BIND 10 trac510, updated. 8dcf5eebb1b81e6cdc963985daa6c80497ac8c16 [510] forgot to include changes

BIND 10 source code commits bind10-changes at lists.isc.org
Thu Dec 8 10:02:56 UTC 2011


The branch, trac510 has been updated
       via  8dcf5eebb1b81e6cdc963985daa6c80497ac8c16 (commit)
       via  662233a1483040da5dbc29dd9c9baf6bf0832223 (commit)
       via  b41b7dc34a8a14339a1ff9daf1d705997d9abc43 (commit)
       via  6bda5426c6f8b4e9faefc2075575e1c98bc3907c (commit)
       via  6ff03bb9d631023175df99248e8cc0cda586c30a (commit)
       via  b7b90e50531bcbd2caaffe6b51aea8917a56f40d (commit)
       via  fd5713eae0276a3d623953c88fc6281aab0b71d3 (commit)
       via  4c0fd2ba248e5f925566e02724b34c85179a8c51 (commit)
       via  d408808ea8e4fe24a21d13992a88ad62c22634b8 (commit)
       via  cce182ed937f7294b2a11a3af5b2e03a380b7cd5 (commit)
       via  49f1f1bc36042d4b4c27a002e963f400deb694d7 (commit)
       via  862b0e38047101afef0f6d81ba3f0d74c9a51ea5 (commit)
       via  cf5e4481b1f1fb00e9897e4cd0527a9a707c4a63 (commit)
       via  1d5d7d8ea8d4dae23783b71e4a93165a36124663 (commit)
       via  696f576c4743130bc8a77844c700582d5faaf391 (commit)
       via  7a1997b2d44d05c179adecad0bf68c1f7c19935f (commit)
       via  e580e44a38c8dcf02e6ff40ba5977f18c8200572 (commit)
       via  11885ff9e91c98ff0c4e93d81fc2b3d47a02090d (commit)
       via  0c53ade72d9589d3d521937652af20f9d7a20f8e (commit)
       via  cc20ff993da1ddb1c6e8a98370438b45a2be9e0a (commit)
       via  b3c9fffb335e7a42ff07a99016df46ccaf3dae97 (commit)
       via  50fdb098fc80146a714794cb9156ac03de909103 (commit)
       via  a4f7763150aa2ced077f67bddbaa39255bd2fbf9 (commit)
       via  d3792aa7fc6ca920c7f9a3f36318ea1160974850 (commit)
       via  9351ba5d5461f8f93a169a99a5e378416a970bd5 (commit)
       via  8376919647ef84268085bfdd56271714416a6405 (commit)
       via  4e636c3e9a365304fc463929cdddcd555dcb3ad2 (commit)
       via  73721a97b1f3741bf58bf774601fef99a4ecb54f (commit)
       via  6764e7da0d5c6967e5607fc6e31c112895ed1827 (commit)
       via  f9cbe6fb6e0a3d3dc03218429f530e6c01920169 (commit)
       via  18128a99fd70d66eb09312dd8dfa0f0521033f97 (commit)
       via  173de1cea65293e5f7cfb904454ee4fa96c51e1d (commit)
       via  e533dc83ccb7bf541e53f753c28a52248d7b195b (commit)
       via  2ae72d76c74f61a67590722c73ebbf631388acbd (commit)
       via  1ecbd2b16b44b6439030fd245f951fe5a538ecc5 (commit)
       via  83ce13c2d85068a1bec015361e4ef8c35590a5d0 (commit)
       via  5f34cb40976859771ab996113f78937310e7bda5 (commit)
       via  8ee52fc304388aef162a819e2e59066bb70f0618 (commit)
       via  3fdce88046bdad392bd89ea656ec4ac3c858ca2f (commit)
       via  6690b8985d656aba3f25082cb62c9c02e5ad5a0b (commit)
       via  6d921ed561b6ef9d26273ca321dfa24622a982b5 (commit)
       via  144549c04afdc36a98c7530eaebafb2b3d38d545 (commit)
       via  4464612807e6c4bd120298ca105b0503af0d3110 (commit)
       via  0b5da8bd0800bfa3744e23c367cee2c38de7a497 (commit)
       via  f06cabd31d8e43781e4e32bdfdf24c78931d3ca8 (commit)
       via  0e945f09e0e127e5097c32e5c84d96e34a18b3b6 (commit)
       via  c9be5877151f0564725d1cd9a20fe393fe7b422f (commit)
       via  560122414abc11fa2a39331734c607cc37a4e76c (commit)
       via  5468c7defd530b29696108cbda6d278b14be351b (commit)
       via  64853ae0cca2070a0536ee6f499084c8a9017fa2 (commit)
       via  1d8a592d1301b7e3a39c88ce1e001122db125307 (commit)
       via  9682bf18607745c83437cd4592d3289e68410772 (commit)
       via  537af1705fc5c1695b4b601571f65ead81dc1289 (commit)
       via  ada65c7115b9840f509f2b0c640368735fe69520 (commit)
       via  966fdcc69001cd2562ca96b392b9a45e7c336224 (commit)
       via  c789138250b33b6b08262425a08a2a0469d90433 (commit)
       via  e665914a467810569a22e093a56ff5c711179143 (commit)
       via  5b64e839be2906b8950f5b1e42a3fadd72fca033 (commit)
       via  4dc636c8d46d857f617bfef2ae9444dce438cff4 (commit)
       via  eb06cb8dfea727c5d9366583581ca674d23c4c2e (commit)
       via  e35e9b8a1cef995079ef15b0321aa7b420139226 (commit)
       via  5de824f59cd2ba7e8cbb3cc58c4cd42c585c09c3 (commit)
       via  8bab1dfcb1f33d58bf64f4c86ca7ba860b57cc76 (commit)
       via  5e07929601d0799df76eaaf3ac5165b634efc556 (commit)
       via  81f62344db074bc5eea3aaf3682122fdec6451ad (commit)
       via  eb6053d466fcea08fa66205d598d316e550863c8 (commit)
       via  42d4a37a121ea7df3440268fe15995267fb66b12 (commit)
       via  1b186f0a6fc242fa6dff08944ef43b60010d3631 (commit)
       via  1825a24fe6fcda419cf2cdcd05180aa1b18ca526 (commit)
       via  fa7208a015545459cf56b03001fa1e6681e52d3a (commit)
       via  b6568546ccdac044fd30200a54708f9418e7af9d (commit)
       via  de9532ce586ac69ff58dad2096f23db0cb062639 (commit)
       via  a81436b0c8f9c3974ec3373f586c45e2cf03cb64 (commit)
       via  1a81569fb7c422d499f5a8eeef2d70d20e3284c6 (commit)
       via  07b884ef0f72044fa5a5fd661ab068794ff68ca6 (commit)
       via  207038a3ff7503cd2b2ab44238c71da55912bb4a (commit)
       via  97cf501e33b45c373aa12a3cb8ae76909d3522bc (commit)
       via  4ca30d27a1149bf5c445f382c4767b5c4e168d95 (commit)
       via  f6def2435fe72e00a782244461e8a186a4a23e63 (commit)
       via  c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca (commit)
       via  75fc5817606b892c8eeb4964dc2f3d2f044f5491 (commit)
       via  64c2d16fff1dd9e903378a55087843ad058791f5 (commit)
       via  7ab1afe9a76986c4f175c338fdd6a8076a9d6dc9 (commit)
       via  e99a54597a5bb6dde1a0240ab74ac010b5029afb (commit)
       via  f02b9adf8e899f9358a26e087cfb43a5d4657b07 (commit)
       via  3d5f2c3c14bcbf9cb7441f61ac8f84bceb8e6594 (commit)
       via  c1171699a2b501321ab54207ad26e5da2b092d63 (commit)
       via  8f5429e41cb99c615abc1de0ee549702ca638217 (commit)
       via  52fa244c12eb6d708b0260b5554396df5d00b079 (commit)
       via  3e1a6afcabbef24f9501d8f4e3ed27d824db4309 (commit)
       via  ce54ff203a48042d3fa085037a23b315ccc2ecca (commit)
       via  8fe1dbdadf8ce7aa288ae08320f315ab56433cb6 (commit)
       via  f76fdc8db2c0c9eba1603af7fa0272e7955e20d8 (commit)
       via  6247db1cab96103bc06a6a281963227084cfb68d (commit)
       via  a9dc55c6cc18e2ed28f79cfbbdf7408a64a04ca4 (commit)
       via  1dddec95f5e398269b28473a094dd6ad00ce648b (commit)
       via  9150be5e5d0d6c0a46e44a0bbcdbd235c74bd6a7 (commit)
       via  f11f46b276646364fc115783ccc3d706510a2ee8 (commit)
       via  5dc772648fb9779359b4d409086d55745afccad9 (commit)
       via  151a4b05228e38b2031d095e438b63ae75dc0b76 (commit)
       via  3924f73fed9f8158918713b09672175f09a973e4 (commit)
       via  411a806a41666b522ed35552588789d114cc1390 (commit)
       via  ab3f90da16d31fc6833d869686e07729d9b8c135 (commit)
       via  e12070c73b529d348f64f8f6e24d75ce710a8a12 (commit)
       via  710e8207090f894b14eaa9834a9c6cd551ea950d (commit)
       via  80c131f5b0763753d199b0fb9b51f10990bcd92b (commit)
       via  a01eb512f67a14855fc9be9fff561c3c86634e0b (commit)
       via  635662711c673bbcfc8fac95c96cfdc33702ca94 (commit)
       via  15e23bca2cf7f266d32c6bb30a142a80ee543227 (commit)
       via  ec1cc2b4be6e19519644534889865a3ee2c81a8a (commit)
       via  277b80e0671586d8ace205cb53465b1f6f414466 (commit)
       via  a435f3ac50667bcb76dca44b7b5d152f45432b57 (commit)
       via  6dd270220a4bac70fa4cd6a898e331b658fe0af2 (commit)
       via  1bb5168b7014a83690d1bb363dbcc0fa6d8fd7f1 (commit)
       via  ddb6d109c0947f203eaa6265a22d2fb3b166db0b (commit)
       via  2eb9f486619e27aee0684f840c85d152b3ddfe0f (commit)
       via  71378c1048bb610c748788dabfd04e421f6b4ac0 (commit)
       via  de43982b90d0fafd6b4e1857e366a6cd983cfab7 (commit)
       via  77d69c99f2b3cc8ee627d8f73174ead9f03da412 (commit)
       via  3ff33cfedcca0cd1acb80a5cf2651f89403a82a9 (commit)
       via  cf297878998c80018ba9523a53ae4947fc7e6a5e (commit)
       via  52f4e6faf56afb5c0432f88d5b1528090530c62e (commit)
       via  13f108b25fbccc56b731bd5bcc505cdf48e91e91 (commit)
       via  4d3aef6a83965e26781d6b74f0ff913926845c7c (commit)
       via  fb33c8d379f9e75b82edafff45d4dc13fda62630 (commit)
       via  4f02b45248227dd98904b61bbcd2e6cff36b5fd6 (commit)
       via  54d9d7c1597df3bcdf47d07db040f63f7008c6a7 (commit)
       via  48c07943ac1dd24922f46cf970c214b5cf24813f (commit)
       via  bea7b0e3fde35a335bb9e6cf170b0fc240650275 (commit)
       via  9b1c64b7d164b6b27d126e55391b2bbafeaf8c00 (commit)
       via  96bf3ab5271347542e13b52e2c37b9c8810a6fad (commit)
       via  c59bb2dcd90a5d580a7f3c9e42a54a080f763add (commit)
       via  319bc2d65301606aa938363dcb30a8519755886e (commit)
       via  d953caeeaf821743ed27ef4a47a45bef66615dc9 (commit)
       via  5d382b4295b8455fae844a5ca94886788f6cb19b (commit)
       via  d08c42ad20f2c91bf64ef47ed893fa2aac4ff037 (commit)
       via  8b92bb931e29b7b1bbb8147cda4f7d0aac507ac1 (commit)
       via  08915b387e64f3cf9d9a86a5a21c4492db3a488c (commit)
       via  1d4541dfd067cd2f0c9e155049c2b7f9d70fa896 (commit)
       via  1f6edd11fbf7e0143f99f20fc714044b989b299a (commit)
       via  ecf6a71b5845c6710119dd97b500c7edeb3f44c2 (commit)
       via  a24c6579ab039afd67ecb50a71b9fc8eabf9b6c7 (commit)
       via  3647e8ff9c194c1c0a576558f4f49ba4ff2614e7 (commit)
       via  c3d71baca757b39e13968369e0afb39dd4472eb8 (commit)
       via  a9040d4aba8e3c01a77236c81f07e2b06b300918 (commit)
       via  35556de064c193779c3cd5e5b0fde583f4a8d598 (commit)
       via  c4f22c20ee19e1ffba43914671c059a434f4518c (commit)
       via  12b72af07f5e06cf172b115b0acba3fbe3554467 (commit)
       via  ecd9c5fc4b3cf747e2b5a221504feac3adeb236e (commit)
       via  fc0a31681f7a8e4198068be0038eb9a4f8a74ec7 (commit)
       via  d3db538710b6547cc2e04127fb5fc9d2d5a181f9 (commit)
       via  2ab2fd55d4a12d1469060a3657893121114e2e2f (commit)
       via  2dd7ee33a13a07a00e22fbc81ecb8b19b57efa8f (commit)
       via  5cea4cfbee9770f4299f5a701af89f7cbf977ef4 (commit)
       via  1af57091dc0c38cff538de2470275f25caeb2eab (commit)
       via  256c0a08483ac2bf396dfa8424b4c02f0681a0f4 (commit)
       via  8f74718cc2012ca68a44d9ed9996f479c6834101 (commit)
       via  5c92f567d93977bd56a7ed2898c7bee098b552ab (commit)
       via  956a0a589db0a8250ec94ece377657783ac15caf (commit)
       via  39def1d39c9543fc485eceaa5d390062edb97676 (commit)
       via  bcb432839cacdf10172d49dec94292871aee3526 (commit)
       via  164d651a0e4c1059c71f56b52ea87ac72b7f6c77 (commit)
       via  09f6d6281a4203a91dcfb6c56e240c06f11935b6 (commit)
       via  76fb414ea5257b639ba58ee336fae9a68998b30d (commit)
       via  e5f37058b67c641b8eb024bd48ca269ae9e41163 (commit)
       via  934a07b6d0ebec8bab258398894905de32878a8b (commit)
       via  40f6dd2b378f31f4ec561eeeac534874a02a8ae8 (commit)
       via  84fa061af28d72e51939039bfcbb04e1febc3cb1 (commit)
       via  b54f1b460285db4d6ae89dd716098a88363b1511 (commit)
       via  c1138d13b2692fa3a4f2ae1454052c866d24e654 (commit)
       via  35b1914ce6ab5481ce40f584729d0949746c2219 (commit)
       via  4df29b3303dbce85b8143d8d74935b3c9283fb31 (commit)
       via  33a956b09f22597d91929b22542913412757e279 (commit)
       via  ed91f985331705fc345bec838697c9bda4b6b7e4 (commit)
       via  1219d81b49e51adece77dc57b5902fa1c6be1407 (commit)
       via  8380ccceca1b8412fbc6742cb37dbd7de843ac50 (commit)
       via  38d84c59fbc097e57d03ac10d6a83edc63c4cffa (commit)
       via  c0cc183880fc5e1949bcc97585c20ac2ab21e281 (commit)
       via  2d85e22f10321fbc5b9cd12f70e90907cb01830f (commit)
       via  1c9f121360e6e612d02d365d70bd0843f8f93457 (commit)
       via  d142274062ed21d53f9e0b2a85531c935580013c (commit)
       via  5de9e8a440b6b4ed8c6bbce156d75b740ec4c1b5 (commit)
       via  631c5c2d24ba8be2b12930cc8267b2298414d563 (commit)
       via  1b3e21e08311d84d649a2780471e9a8b46143dca (commit)
       via  ddf219d781a40764999bd8b19c80f607c2783b57 (commit)
       via  24c2111ed800e95bc62901cd3b2970692a205578 (commit)
       via  f9224368908dd7ba16875b0d36329cf1161193f0 (commit)
       via  4a68215905542025570f06fcc703fa44d6b37cfd (commit)
       via  b8f67d200e64a2a9931b6d664781caf835f2ecd4 (commit)
       via  315f4999df039dbb2baa77ee12afa0dfbe01dc25 (commit)
       via  7344d2788cd06e54ca7ca3e3a3f69010dac80670 (commit)
       via  46bd9a8e6e3a543f97af6213bc7e43d619064aa7 (commit)
       via  ce546dddcbbf7efc4778c1d0d4210ca139ed5bf9 (commit)
       via  fa89a0798d166574e089b38d7bd43a701eda5467 (commit)
       via  12b1a920f219e627bb5860f0a0217cc5c86749e5 (commit)
       via  cd342dae58399be6cdfad55a466a76ee385ccb08 (commit)
       via  f9e81512329b71d6b5d94bafa789c63e763b2a72 (commit)
       via  226dc3ee718e1320079d6c6d8642e0f0dda1bdef (commit)
       via  962a91763b9ef79e887e52e22fa23462ff7d680e (commit)
       via  170936d47b2e9ad3d5c3ceabf86026fca9795150 (commit)
       via  dbf32272f3b76b90678add39038fb6978c03ab3e (commit)
       via  3e19362bc1ba7dc67a87768e2b172c48b32417f5 (commit)
       via  295732d42d2b0a9641edfa352087033d8eff2794 (commit)
       via  758ab4461e8792e563ce1e0ad069d53b5e15d8dd (commit)
       via  b449ad20a4f58eb96aec8cd7dd7bb857bdb5d14b (commit)
       via  8279efec0dae2291665a99e4d489e8e5ef7a51c1 (commit)
       via  9f89f07adcc9ccdde454016f037076e04eb791c1 (commit)
       via  fdefb47da0a5d7203496738ba03d4e1737e8149e (commit)
       via  93a5d45d9c1aa90249494608b8c2829059cc3b28 (commit)
       via  c1f5fb059e9c272dedc27a3f14fa8ed2fec71b95 (commit)
       via  fd1ae8e05771b151877ae3c082a7b3e3b32a20c7 (commit)
       via  21887dffc4cd692ce23bfff1685fba0e2c1e55b0 (commit)
       via  c41c32c11999a34a46d2e20155358438d769f767 (commit)
       via  181926059b0162e09c30b4b967b09294d713918e (commit)
       via  466a968426ed9062d86239560492edf7dc72ee02 (commit)
       via  a59f28758abdb92721e010956bd421148643377b (commit)
       via  e09910d37b783b182ae2dc83f6cb272bff68cbb6 (commit)
       via  da3e9e54f1374d581d78f1d874ddafd427a622ab (commit)
       via  b34bf286c064d44746ec0b79e38a6177d01e6956 (commit)
       via  648a187c5d7181019dc19531a1057bc3e6f70e96 (commit)
       via  16b7feca0339f67acae30eb67d913bd3ef0298be (commit)
       via  120946aa30b22c36995135b7d5bfcade4c26e192 (commit)
       via  78770f52c7f1e7268d99e8bfa8c61e889813bb33 (commit)
       via  ff5154291678973eaa0483518302b74a62f0acba (commit)
       via  498677a8877e4894fad598f9ec99974c414ef58c (commit)
       via  c4c93896137dd936066cd1a714569468bf248451 (commit)
       via  713160c9bed3d991a00b2ea5e7e3e7714d79625d (commit)
       via  9bab697bc984a6565a6f0dfe8a981f4809edc91c (commit)
       via  ab406229e29b7cfc470142ee0166086bf70790a3 (commit)
       via  e24f557e8208f43a8ade0855395c87b175bc351c (commit)
       via  3f93372ba9416c9d759ea0c6d8981837c036448e (commit)
       via  fda23d6cf412c2a90df325c244f79811d939d3c7 (commit)
       via  b79e0ef1ad1ac5c64c8a131ea8e125ca6df066eb (commit)
       via  3d3592d4b1e7d3b0b3164067e57c1343db691851 (commit)
       via  32b1e0701a9b138321e510a432c5cdd49fa336c6 (commit)
       via  84290dae3201ee83c8e4aad6f7e2b181d708811e (commit)
       via  9b6f54409617896742151c6aab9f5f318b7f53c5 (commit)
       via  36a5cd751a12ccbd31284ea19d0b10e8a5836b70 (commit)
       via  f1cb067ea86ab38810007ec6743e7c1f91042e99 (commit)
       via  6ddab5f4ea56162d0834e22a68605a1a427cc8c2 (commit)
       via  cd4fd339a084dbfb1e2d35d5c008260de9d48572 (commit)
       via  e4b99333e4c9946741148b6c95ed070653bec0fe (commit)
       via  b0cb2b651ec620418e891db0d21791beadb81906 (commit)
       via  e9e0f96594eec741393fa197c1d91362c96109e1 (commit)
       via  96e0aa96b5a2fd31833e9afe64bb8e4cc34e23c5 (commit)
       via  48ee64bfbde99ce88eb305d2a751283b42c826ad (commit)
       via  cfecb1acb98f45a12864b7730ea58afbeb674c7b (commit)
       via  9ab6902f20b57452eaecf8f737d37f8dedcd623a (commit)
       via  d9be597335af84bc93c9559bbd76fa85ef0f49c4 (commit)
       via  8c57956e16dd09b528cd11dbf4c2fa51e48da359 (commit)
       via  e84f2aa5e9e493aa7dadfbd3b31753b5837d9069 (commit)
       via  dabf62d5444fe3a1e55e72aa393e0dddf188df7b (commit)
       via  ca3d2d1badee8e5e6d3c1f73fb29afdcc7692fa6 (commit)
       via  94ec743d73153258d8a231e2e5126749ea00e3c8 (commit)
       via  dca136175cf0dde67a63f40953187ca60f90caad (commit)
       via  625aea76304c024102cb5065f910e5121b1641f7 (commit)
       via  a4c51111cc0fc28c6517a11f8ae88682ab8e6996 (commit)
       via  8a5b3e3b460e7f741b1560f73423c8d688db9d85 (commit)
       via  275d091229e914a848408b785f0143541abed6d5 (commit)
       via  b5553ef764f6c8cb0acea25e14b6e7a6a3a4cd47 (commit)
       via  bdde86c05963e9d491015e906c1b899609417887 (commit)
       via  936e61c743af685c398abc7590cd813b70a5f5e5 (commit)
       via  038c8121cd5e6cdcda93c4b167b8b1e858ced3f5 (commit)
       via  eb53cae4b35f858436cc20bf28ad06cbdb2211ab (commit)
       via  868282b5bbeadf7ba0dda49cb9813a1cb5ad09e7 (commit)
       via  11a4fe2def28da2ae83c94647a11fbb2114ec467 (commit)
       via  c2213ce7281be2aed47023a6f052bbec868a6028 (commit)
       via  60f2c617c5951fd465eb094c5c7c82ae14995efb (commit)
       via  54d84160bf6ed66a7c86f9f9be8d66ff25f80884 (commit)
       via  045c30f0dffebb30ad8862986be435748ed0efb6 (commit)
       via  a6fd03e989a1fd5ae9514774bb3b3bb2a6668765 (commit)
       via  8c07f46adfdd748ee33b3b5e9d33a78a64dded10 (commit)
       via  235ff5af7733a7d464b172c4424f8facf284fed6 (commit)
       via  8f3f3e9da36c5a0cbbfa4e2a5ddc598be7fece4a (commit)
       via  fe04c9377836fcd387f79447906e7ec83911b5b2 (commit)
       via  43de15e4b0bd0094910ecc4f4365744cb6c1eeab (commit)
       via  5e2238494ec665b558a6bf3b6a2c7351c1e022ba (commit)
       via  755cd222be64019ea3b8db62e6d2643e6b6374c7 (commit)
       via  5720f2262f0a1e4b8b2dcb1b66b94431e0dc6ff2 (commit)
       via  8780f998204e96767785b29cd5b0e58cbeb10e1f (commit)
       via  cb74737554ee7e1bc3f03fc4112dee0d2b64d174 (commit)
       via  46c206bab683f816304054c3a3f9c21ffa0af2a1 (commit)
       via  0d94cca23a4f22d1bb953d62d38358a8b0e49f01 (commit)
       via  4215dabae27f7b9b089ff8fafef2ba5425062fc5 (commit)
       via  219879a5c8d6cb361d6d6f91d88c199930560994 (commit)
       via  7003eecf6f7792d140e74bac444fb00eb7b8415b (commit)
       via  81986f1f0af388bc75baf4fe26e29771f885f200 (commit)
       via  08e1873a3593b4fa06754654d22d99771aa388a6 (commit)
       via  90a1746c2d4da5b1a75ea76a7f0febc35b80c440 (commit)
       via  0878c77ba4bcbaeb509f2bb7c2d52ee62864dadc (commit)
       via  efeb506e624945c6f21755621897a088715045b7 (commit)
       via  fda514f6b5ff65648709273dc62f960d85f4e066 (commit)
       via  2afbc7d3564b16d49043d48fe5ed9dd343311861 (commit)
       via  ce28b51d36567d63b5258648f7fbe406baaa5677 (commit)
       via  9753568c850855beecaabf500aea33483369d64f (commit)
       via  7c6c725225eb89d9911b28aff0c6d80152e26aaf (commit)
       via  0ad9b8c8482a134af7c47b64b412f642d08ce642 (commit)
       via  132e0b02edf9a0cebccd64a183eb56839f42606f (commit)
       via  d0e0bab2c4e3ce4f60c893d3a89ec8c91e2f11e0 (commit)
       via  3b5532c40c4aa55288a8d2c23163525c34568819 (commit)
       via  2aac7b891f4ee43fa29bbd41ee3bd48c4a849010 (commit)
       via  46c4fc8c240445d0d7cb70a0b5ae17eff371c5db (commit)
       via  65f4be2b65bf19baad6bbeda742b44dff7cd9b4a (commit)
       via  a3ba4cca05891f1052aae6bbe28c125799c7fe6f (commit)
       via  bccc91bbd2496b87b408ebff3cd9c6880f952b1c (commit)
       via  88147da513fdb22eb4e430390746f36c96304c7e (commit)
       via  4dc03f5419813b974b9794aa2cba4f55557fbbb5 (commit)
       via  dc2ea48db152796f6c0f62641f00646ef32e2b9c (commit)
       via  b513f0ab652e11892c232b6170f675fbb9990609 (commit)
       via  bde035f1ebcb1a9c7678692538f9aec18f5232e6 (commit)
       via  3a330862f1357a4e0edd570de5896785029f4530 (commit)
       via  567f822d4758d13b84161d67118ac1bce08b4c47 (commit)
       via  f94f5bc089b09a77b34138bbf19ea71921a7950d (commit)
       via  e3406364189d62ba54d85c3d23b40cefd02af584 (commit)
       via  6da32eaece41f360a87388c44528dca979c10ab0 (commit)
       via  b85213cd68ec24c5deede886d466bf0911b9e762 (commit)
       via  056a1342f0d73cf53a37ed672a8a4ad907c4cfa2 (commit)
       via  3dcdc74a5e0f8cb7fd0c6a3f6dee480e30199f03 (commit)
       via  7fb9faf4602b6b4feff4c940942c12be838a8153 (commit)
       via  d60907a85ba3f762b81189588d1b7317b95e0521 (commit)
       via  b88b05b2a779554a0e3c345933104d42046fffaa (commit)
       via  71de39fb8126b7200b2f6dcd9689a000c958fe0e (commit)
       via  f337180ad87778e3b91111efe93c3e31b1c92a91 (commit)
       via  489a53541118413b38865c8a3cf84b24b8b7dfe2 (commit)
       via  63f04832f2604868133a23d110ce6df5a9707993 (commit)
       via  de07e6a0ab66de4d3c7720dc93bc7d9198c9d26b (commit)
       via  4ca71b858671d112fade23b449f2a59f14d1d300 (commit)
       via  01c6801b65e167ba2cf635143b988bf4bcbbdc68 (commit)
       via  31d5a4f66b18cca838ca1182b9f13034066427a7 (commit)
       via  0f7a43ef24e2fedfa554200cbfa3d83971dbfd90 (commit)
       via  9f854755d1bad72bc4bd94accbc60d211c880cb7 (commit)
       via  0a3592efda9bd97cf251163cf9a30f38122cb7c2 (commit)
       via  1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2 (commit)
       via  2139076757c1a14ecce96eafd1388f978732f8aa (commit)
       via  ab47b771999bd12171e65a8a3fb2ee512b709c4b (commit)
       via  ebe4e57805eda25ca347e0a9db8adad11fb3d4b5 (commit)
       via  d85912df5ef89ff95c3653403503f61d120a0761 (commit)
       via  0f76bcddad8050baf811b0eaa5a117cc61dcbba1 (commit)
       via  f01fb1d89b20b23c0a680b1a97dc83e5a174e2e6 (commit)
       via  d2e805bb39d06f0ed47c49879909f35b5d341530 (commit)
       via  9862bdf184aceb37cfdbb4fbb455209bdf88a0f4 (commit)
       via  92794c72752a77005c2f9c7683fd2c65d7d802e9 (commit)
       via  046729c74341bb2ed1e6f60f81470cf6a6883000 (commit)
       via  36db2f897ac139ca9b71ccee07a7b1ba1e3aee7b (commit)
       via  e6a596fe8f57103c735d8e135f855d46c248844c (commit)
       via  f8cea54b5bb8f870a01beebbdcde5eb90dd7d8b4 (commit)
       via  3000256b60ee6a2c19a7188be4d17eca833ce869 (commit)
       via  137a61f2afcd6d16ea20c3a4436046d783a5babf (commit)
       via  edf044e9e2f1572b618ec2438cea1cad46432276 (commit)
       via  6b75c128bcdcefd85c18ccb6def59e9acedd4437 (commit)
       via  1a5bd80bbe01abbb2a5932bc43fab8e7a287dcf5 (commit)
       via  c03e6df1521a378fa3cb9eab4a11db93e6e34969 (commit)
       via  573abf93bec24753aebb5a6c70d8f50def521879 (commit)
       via  d287d9c92ecfb59d2c9f525cf79c7bb5167984f6 (commit)
       via  50e96053742a30584f91a6bdb4b788977cd166bf (commit)
       via  06d6be693064252ed2535fc8685ca4e7b8db0989 (commit)
       via  8cea64b69af8d5ef21497d2f1c9812968ce5d8f7 (commit)
       via  f1e08d75cabc45454a9bde86158dc8c7348d7f9d (commit)
       via  cc48074a9fec60ef9ba69991549f9e167e620225 (commit)
       via  7a5903389ed505f6c7ca4c87adf705216d11d1af (commit)
       via  8e8607c6faa34d9493a831054ecb64281f1f06c7 (commit)
       via  d99d546be040419fd49ad3be179eb2206f5023de (commit)
       via  4ab7d17edc10ce4f7b834709aa009aba4db9d877 (commit)
       via  df02b63fe1176c572a7eee996921f211ca970953 (commit)
       via  f8a64959bc5f3ddf68ba4d01bee092bf4f1f9558 (commit)
       via  7e96227163334ecd54e506bd2cedb58d3f6cf91d (commit)
       via  ca42fb6438b70ef569d00dc07b1bb23c0f6124f2 (commit)
       via  bcb37a2f6b11128620bb34a0c2d3dbf7334c0ab7 (commit)
       via  d17ae6dc7160a471abdd05f22aacc359df54b4e4 (commit)
       via  d9319841c509648c1ac18fec2c3d2b2c08313eb9 (commit)
       via  1aa233fab1d74dc776899df61181806679d14013 (commit)
       via  6d5f34008d7e793546fd990cad11e40268c0ff04 (commit)
       via  45bd390859827c02965765b4b146b5351cbbb1c1 (commit)
       via  0f6b216a89583edd445942386df5a388b39149d5 (commit)
       via  ac552055bc8a4d996a0c24eb5f13d01667a3d77a (commit)
       via  26aaecc388f8c152b5d63a1f3906ba5a625b0e31 (commit)
       via  10c84106e8b34d78fa1916e4bc3db15030fd94f9 (commit)
       via  23cfc5b4d9b384172d0eadd2269ed6a6121966a8 (commit)
       via  8d7ef6fe3b696ee2cffdc4f10fdf673968933077 (commit)
       via  6cd1c3aa7fb998fe9f873045b74185f793177cb5 (commit)
       via  e6d7624e503084067e6c4659c6bdbd89c038fdd7 (commit)
       via  4b56e1807d8ce8b86da6793b67b50ff57ee62b9e (commit)
       via  5c16ff47ae8d485da0684ee7dd5547eeef3c6232 (commit)
       via  65d8475336b8e884ff261b9a1fe03688e1618cf4 (commit)
       via  388e77cae5d9260bcc314465f6711bcdd782a26d (commit)
       via  96c94d6baf0a68b641cc9b93966b09b38ebaa15b (commit)
       via  1db4e8af5cf9a8600e8005807f0aa5109756c064 (commit)
       via  4aa0057db95051e8e554bb5fcbcfbfecf822a5cd (commit)
       via  89b3af8226cb89bcc59ceff5e9547dbfc5b30665 (commit)
       via  d0a7cf4a98daf0ec8759640a91a12059cece6c6d (commit)
       via  5dc6be6febd523e202771cd11624efc29854349c (commit)
       via  f230c7d18b68d5c03131089a4f5c9739af7f9d83 (commit)
       via  e1682a42d23d36a3647878e13681dcd659622818 (commit)
       via  e45fa27d90ab3ea7b1081ca7d9513f63f5083b8d (commit)
       via  1e9bc2c16ef78f35ec35e340c696b4bdc10b47b2 (commit)
       via  85a2ce538c6f939ca539347676e5587228a29895 (commit)
       via  d1773b2ef6f98c26493ae76783158fc2ae6fbe52 (commit)
       via  2f51afcbc57c6d58e7d90f37962f3b93bc768e1b (commit)
       via  0b9c1b299f2078ab1a7bf08759a463eb179f0365 (commit)
       via  918c35143eb61d6e0ac96e98f2a95b12d55fdc0c (commit)
       via  480da1fe075da66aa8a144d37c23bac2fcfa1e2c (commit)
       via  007d31f50876cd58a031dd86b461145e77bea63e (commit)
       via  81b1ba0e9cf67bc5e8ee6040b28436d4c64b72cc (commit)
       via  27b7f9d36113514773777eb94bf66a3ef8c49a82 (commit)
       via  fc17063223655ab14b4db33bd63dd33fdc5ed5ac (commit)
       via  6716721a7c10737d86a4a29af530d54a458f83ca (commit)
       via  e8aa8b8b994146dfff6d29435a66c88dcf79eb69 (commit)
       via  61feac8366f972b60410b925e36a9267338b3e9a (commit)
       via  586c93cef97215330b8bdffed6c35335fb66173d (commit)
       via  36dc8dd6f15a42f401ffa32829ed7c436e529eb3 (commit)
       via  5d6c71aeb2575883488b2cde87501aa84260b1ab (commit)
       via  233d2d783e6be97f208998f9fcf79404eea5c9b3 (commit)
       via  2085b2255a79c0e5a04fe457bbb228d2fa24953b (commit)
       via  2d20ee347d82f840328c2bddd014cdf232962843 (commit)
       via  1ff0be2456cfaf9279970ae9a30a48d6267b96cf (commit)
       via  80447181a64656b97afa9ab71440907017e873f4 (commit)
       via  3878aa43817deaee33b21956d3066baef77a24ce (commit)
       via  cb1c34cd2ffb876819441b4869a66a4cb500a8ba (commit)
       via  dee6a4739aee15e8899da2e35d179cb1d8623e76 (commit)
       via  50672f2d6073e813fb80250398b6e6a2b93c915d (commit)
       via  1a90f118bf69d6239ca290f712bfeb89a9027efd (commit)
       via  5d290088a1b996011217cf801e37600d5bcd037e (commit)
       via  3d59d6a24e3a84c3ca453721649e6adfab863c0e (commit)
       via  a95b528af25a2b3bda91f9b88c04a20b0b783208 (commit)
       via  58e8ca7d1c5d8f4b69aa174405e4ef280b8012cc (commit)
       via  aa13f832395794bab3647ed375ac8a6e2d26e55f (commit)
       via  f2ffe07f7e25c037855685b7693ea4d4eed1cd0c (commit)
       via  0ea04c4bb216cc822be49626d4b0269956fd070e (commit)
       via  b03d29677700c1dd2a527dafe9987defb7556e97 (commit)
       via  043ff1e7ec5f2c8e3d6b7e278418fc03eea2b09f (commit)
       via  01b4b95b5fb7aa99765f29ffc61f5131173148eb (commit)
       via  c5117dc4d2fd89f1a66849713c6a3cd51735699f (commit)
       via  5d7004d0ac4fe553a61fd2eb99a8af3eb7324956 (commit)
       via  fc0fe98a085ece85e143188c5647740f95d347bc (commit)
       via  456933355bf3bc2db5a6c52ba4dc6d8e826ce6e1 (commit)
       via  9697c6b3cc3e49d96efc6777c1dba5ecb00eb785 (commit)
       via  67a11e710e06647dfb65ea6e592fd80851422dad (commit)
       via  b4b9c3e18f8d76b695d7b84f1b128ccba229d814 (commit)
       via  bb76c3f643eb85fc8b1ed8087f72368ad1d23aa3 (commit)
       via  2764ae7bde7b314773b7258d23fce3813c4407b2 (commit)
       via  1d9614bc52634bd512121f34af66290a2cdb2958 (commit)
       via  34092bce6cb5755eb6b53979f8f624ca78b592fb (commit)
       via  35ca4f5aa94daa5e3a8ddcb02812e7d76685e65e (commit)
       via  6d46a3787127f87aa65c9dfb626476f79b4f0194 (commit)
       via  c692292fb26bf6af6e94b7e160c0c7af27e123ac (commit)
       via  d6a9dffdd4ee8af94e31ae9462e2ef851b49fca8 (commit)
       via  bfae9c1e78bcc1e94b4d5eef4d0bb9da1d42f30e (commit)
       via  09e4d880b9e7260caf6b5ec763aa1e0712531657 (commit)
       via  33a0d78c8ff1bd0083251fdad2def37c6c9064dc (commit)
       via  a28f94240549b3b869e6aef5265d46afbd09f6aa (commit)
       via  b843d05fdaefa92abcec50a781dbdfbadb4c9bed (commit)
       via  0428f6fcc7b5acc73f70913a17bd6f23c5a6ad3a (commit)
       via  9b9a92fc3d9cd1e37166f04284a922f9ab220bbe (commit)
       via  a3a4e317a91c075f0d16de7d16cc652e508df101 (commit)
       via  bd938be1cafae39233d0a8357a4e10b383f7de37 (commit)
       via  e7d5e8f78ebad76b695e48fc2780babba6ec07d5 (commit)
       via  0166b44b81851c687d85e4f3fd87ffb0e92c6d58 (commit)
       via  96086ea69576acae7d59e1d7665f622bd526c7c1 (commit)
       via  7c229ebaca82e06899126f9b364fe524ec6d4b56 (commit)
       via  b7e1847c3a1979d3ac593de435e142335cbc7188 (commit)
       via  b3af32e148d004ef5010d37eddccf6df57bdb257 (commit)
       via  2104208cfcc7ab912cf2d530697c7192608f3c5d (commit)
       via  7e1e5f38f1d28c8e19337fb56f3dacba81341ec8 (commit)
       via  8635b169171d0d88ce19f46039ded6e1dab7b72c (commit)
       via  05d4deb643271e0f0b0dcfb22809714086d50788 (commit)
       via  1c8dfb0cdb80841bea487ee355ce85c749223810 (commit)
       via  c5f69488232bd0464cd7e2174be96b30b51b7e83 (commit)
       via  6b600cb1816705b04470ba2d0aca64dfdf8f55d2 (commit)
       via  a3fd03e16b71ae4e9b480e4e48c7ddfa393555ac (commit)
       via  64d4ac8b0fee6702093428b855f3d878d7306468 (commit)
       via  5038c63b05eaee1bda68346899ac3f6baf5fbe56 (commit)
       via  0613c0e0ebfcc8e3cf37678bd6799889569beb83 (commit)
       via  5166d1a65421c3e8515dbcb0d5fcb44c7f400035 (commit)
       via  66bb38a4d0cf296f48181d624d22b1074688de38 (commit)
       via  7d2826b519f95b2fecd299e15952e897c5a60b2b (commit)
       via  e9f0637479f992936b2feab96e50a84a6a4dfebd (commit)
       via  c3b01cc59ba03c6054af4bae42e08965b3f60eb0 (commit)
       via  687b0e5483e088ca07d5f7249b109cc377d04bd2 (commit)
       via  409e800ffc208240ec70eb63bc2e56aadfbb21e1 (commit)
       via  6e4e3ac19c322c65679c6c5653cc41b80305d9b9 (commit)
       via  f80ab7879cc29f875c40dde6b44e3796ac98d6da (commit)
       via  00a99483151a21e73ef432dcba73347e1fd407f2 (commit)
       via  c383ebc71434baa5cb314b3de3f3e18f39ebd0c7 (commit)
       via  d5ade3d32087884e477d8f5b2fa200324b96ea0a (commit)
       via  0e776c32330aee466073771600390ce74b959b38 (commit)
       via  723a57edeb33afe206a8e350cfc583d5cb451051 (commit)
       via  25c802dd1c30580b94345e83eeb6a168ab329a33 (commit)
       via  76bbf34210a5cf70853337a9a9f064c07c7aca76 (commit)
       via  d27f4125c99d13a7a73dee8c196a0d95050a4b62 (commit)
       via  081271155ea18a33a135711a983e8882a2f56eea (commit)
       via  e41f8459ca5dbc886e838e6e32585ba5c7eb96e6 (commit)
       via  e856c49ae33b2b79d8eab0b313e4ba25db261c4a (commit)
       via  3a6d50835b621e4825ec0d8434ce066bd31020d0 (commit)
       via  2182b30eb6b833fe4c902d635aa97ad901518904 (commit)
       via  9aaf85731baa1ea5fe9484efc9bf48b264f60d1e (commit)
       via  6d2960ff386a85c9738fc4cfd3975ee1d58eaa04 (commit)
       via  3a25578a01620918cd722e430b61c0fe91177e0a (commit)
       via  dc491833cf75ac1481ba1475795b0f266545013d (commit)
       via  8f876a23792b3feeedb807a66a08cd4f62d60d8a (commit)
       via  6cfcb5a3c784f774702d9ca183e13f6b6690b74d (commit)
       via  d5ec22cc344998038cf68b6fdf309ad2a12b2b5e (commit)
       via  701ffebae5b357a693e764bbef904dc374ebb591 (commit)
       via  e16e284794d66212aec735ece0ee1fc112f2d2db (commit)
       via  2024a1554624868e5f0a4d09b75c3ddf39dd342d (commit)
       via  10b6bc17b7c264f41dcdba64fc3a79904c06164a (commit)
       via  a48e7fa14f2ef90bce27ff3e7aa4a93165e08d37 (commit)
       via  62809f71c563128cb3cc467d867c621c61dbb926 (commit)
       via  d07206bb5c5ec4b3411e759a9afc75c2c593a4fa (commit)
       via  8fc9df7f444af31a936e1f261f7560b1e222a3ef (commit)
       via  254eb201171f450826e2c907098f0c78a7e3c7f4 (commit)
       via  d38014229e33d2bdb3875e53b9486d54b3920ecc (commit)
       via  17565e10ce667cfd7048d4867795ba3cb6876f2e (commit)
       via  1cdc35417c6f25f254b7053e801e8415eeba9d84 (commit)
       via  0ec187bc1e3cdde29b20f2465c4d5417e04e2d6f (commit)
       via  ce39dd192fc8ba15479fda1a9da08deb8c3d2225 (commit)
       via  eb35651d68eba80cbe7a5bc23e72d3544719a33a (commit)
       via  bef6ceb2905d328c712a45754be23393d56b2a31 (commit)
       via  08d090b4685220d3f286e1506e1a3c884146122f (commit)
       via  7b667cbd3bd3baeaceb60b987ab9770684ff5038 (commit)
       via  e5c133124da1b724f0f452f63fa947fa036c24d3 (commit)
       via  1aedd1b56bd3764739d247dda7477bb799a37ac6 (commit)
       via  cd3588c9020d0310f949bfd053c4d3a4bd84ef88 (commit)
       via  40e0797d688e26dae0e93553f63328aa649e9681 (commit)
       via  1107b46ec39da9cdac19af44ba79ae5ee8722025 (commit)
       via  b561ddc9d182cc237143fbd37ab9e6a0277da948 (commit)
       via  af0b62cf1161739d3a1244750b60d3e6b75a22e8 (commit)
       via  b64ab304aa90d938003922c95926ef1b0ea4fec9 (commit)
       via  4e0d6d115cd572e58b886bcaffee3f1df7b6bcad (commit)
       via  4493013b75994f8689a26951592fb575a23e5b35 (commit)
       via  8df7345ad6d658c6a366499b6e491790289168ed (commit)
       via  f0ad44ee4a8bc33ea2109d91243d95db1833659a (commit)
       via  3f070803d6d61ffbbda0f6628bb2d7f0cfdb6ca0 (commit)
       via  c9160954fd701796f52c329e5ec3ca2ba6f5995c (commit)
       via  25b432b279b90ca97dd4a69dc1d4f5428fe2660f (commit)
       via  dd63399d282dc503e4009bb579ddc4ca15ccde5f (commit)
       via  af2a4d06dedf27a1c86cd7ada5e85df495a79ff6 (commit)
       via  f8c76dbe976b3134974a3b3e28ae9c7586439c3a (commit)
       via  c6df34ee69d5f4db86abcd4710c359f62f78e8ef (commit)
       via  56aa312f698ef597a9d819e5fa28e2b75a9f321b (commit)
       via  1cdc605c50c999ffc1225bee5817aa0ae26bcc4d (commit)
       via  8b5b28cdbd7be0c7a79950b52679ac4be3db274b (commit)
       via  b16e9d26953cd7117d14ea8dde9e739cb34cb878 (commit)
       via  df9b10fae5385c1c0f1cacb2894eee347abe1f09 (commit)
       via  ab48bd8228405d50b149c502d7f73b5eb1a57608 (commit)
       via  607cbae949553adac7e2a684fa25bda804658f61 (commit)
       via  b5040b229739c8c69463fe462aa8f7b4a8e47f7f (commit)
       via  357106fc545e6d506c4ec757d306a955c68d1d5f (commit)
       via  8b52836ccac5c331b30812c608d52aa7fc538de5 (commit)
       via  410464e7b0f37c8ba149e543c789a598914fc7d7 (commit)
       via  e715842e4d36c12fb17a8ee3d0a41218ff86ad7c (commit)
       via  20b2131e3753a2ac7e78597a95bf244748b7dd3c (commit)
       via  0f988d9f9fc26ec5dd3ee1e298ac544af3da2fd3 (commit)
       via  ecf9f48f4b4c3beaf97ae0e83c11f4547f024734 (commit)
       via  4d39e13d7f5ae5c30277f602f669f0421e2bf05c (commit)
       via  3bf84d5c678f9f86df6382cf30e694404e2f77cb (commit)
       via  12a6217c59bf48ead2e11aaaedb774af7a618701 (commit)
       via  cb57c9fcaa897752dd7599dcc15d647fb880285f (commit)
       via  1294219279910a89d4a99e6292cea8e13a4c301e (commit)
       via  61dd61b8f259b0938646fa2539fe928608a0fbad (commit)
       via  f1306bd835659173f3fffcfbe1bf8971dc62efd9 (commit)
       via  7cc8a7d6c32472021b34e43d43a288cfa263f007 (commit)
       via  efa6b47c19bc9f992f1c5c0196e07a01d030ecce (commit)
       via  0a7bd2339e604fb26b7bd94bd8c548b188d60adc (commit)
       via  a72886e643864bb6f86ab47b115a55e0c7f7fcad (commit)
       via  6442c07428bf7b8abeb73c4b6a7729ecd4b853c5 (commit)
       via  ed04555e46292f9d573372b07000384b6f0118af (commit)
       via  4f6c6441787be0a145917ae8935b70bb89f27b7a (commit)
       via  e13d28918a391060d9c1f286d19308cb10975cd9 (commit)
       via  5b7e0424c3d826d5c7a9a247d63c7d716b08e470 (commit)
       via  46adf014f18c6b3f9a685b8f0fdd0775a583a7c5 (commit)
       via  1e9bb55e135af5a0d8dc353a2ffde7c5b247f92a (commit)
       via  738b11db9f13c00f5a9ddfb3ab9996fbf85c42d8 (commit)
       via  9b76badecd4b688c95c5f83ecdc168922e48680b (commit)
       via  07520bd77da400ca476432f8bedcd934d992ec81 (commit)
       via  2ab68057dceb0d471daf4524ba338f8f45e942f2 (commit)
       via  11981fea517310f13154bf7695f0278b571ac28a (commit)
       via  092dbe3f2af0a0c010a081f64947dbedb11b3468 (commit)
       via  1fc79b932eaa88be33c224e4eea3fc58907e98bd (commit)
       via  8d36a0115d1b3051b88c9f9687103fa2427e749c (commit)
       via  65bd895a45fd28c43f748f07aad5fb9321fa6a0a (commit)
       via  bfab5a33ceabe3f0d31bd465d13308c8b84adf68 (commit)
       via  ef51c8418dc44bf2882c898990b30fc76ca9a97b (commit)
       via  ab642e89554bedf0a66c2358db71ec16ddeb2e7f (commit)
       via  91c2cf35e41642a997df020de797324bb4cfedcc (commit)
       via  c6e8dd84e81f5686d45cc41f514d4f61d075a276 (commit)
       via  94282a20ab88b590e8b3b2522da2873ca81c632a (commit)
       via  4ddb345fdc31614e191b0a0dea282ddd2ebe279e (commit)
       via  18b04945884fbcc1783e52bed845c34395c0acf8 (commit)
       via  7d25b201c0bc91987c4d9743d0c21b9486b98fd8 (commit)
       via  a1e64504a4d039b4c7f7434451f169c475a1a35a (commit)
       via  9e6570256e27c28b20a17fc34de5689ee4685091 (commit)
       via  a0e6002f56e624a7cbb48fb06d4ddbc612e315bd (commit)
       via  b01c18148a840b0d5719cbcd2653bf1b346e45f9 (commit)
       via  3db6583d93c42b3cb01ac5619d59d19645bd60bf (commit)
       via  1d43b46ab58077daaaf5cae3c6aa3e0eb76eb5d8 (commit)
       via  41f528a9eacdb430406a0d9047049585cae31db8 (commit)
       via  0fed56c3692e358184958cc1263cff67db0f62cb (commit)
       via  1173960107363c04608726b57218a54d2b3b3d56 (commit)
       via  e76affc220a5f62b24e34152afdda62328a327ec (commit)
       via  d15cad92c958a6380c90ba76a2ea968e1d8304dc (commit)
       via  e098bcfbef9b8a66c3330bd37c6bbd8d72a1399e (commit)
       via  784f0a8916465d6ec9c47db9f7f3af0fbd564bed (commit)
       via  5cb4d41cf68ac18fb5a5db68046e3d06b6552e20 (commit)
       via  b5d072cfe24be6ad1636dfdb50405ff32473a413 (commit)
       via  6060fcf2a39711ba5d842a311ea03a47054f2ffc (commit)
       via  00ff3b7b99fd40c267b91fcc2d8d8396e6209873 (commit)
       via  975c64367afd77288b193ae3beb5b95688deab3b (commit)
       via  cf1ce254c246be39069e7e7277e1c506e1b239a0 (commit)
       via  c75108b70a9d560034949a75dc52ecfb59fa0b3f (commit)
       via  6266a0dd4e0537335e22c2941940636fe220c202 (commit)
       via  a7d0518a8c66ebc0eb471eccd67054d27caa07a3 (commit)
       via  b93bdb9b324b7dc56bd12b5c781e20275bfc3310 (commit)
       via  351ce9ee1612362800453a280dabc012565493c6 (commit)
       via  14f9cfa80194d2d391ea6657ad0205e6223e2d25 (commit)
       via  44147cd660a85ea909f54e496ae3c8ad1ed583fa (commit)
       via  5e3d007b0b08f340e646a2df9073b31cd3c76476 (commit)
       via  c3a5acc65768a1d87c102159baae0d04f8c14790 (commit)
       via  1c4e66cfdfab4fb4608f2b8d18a25e28e7a70adc (commit)
       via  7db8a3e327aa6eb8fdc5fed2abb7f52b030fe6f8 (commit)
       via  fd3c952098c46d84c9a277b1409442813a263876 (commit)
       via  b108bc9f9231872d4f3e0fa768b8c0e4506a2b95 (commit)
       via  c5cef09ac250129340f357a9ea2dd798d290be4d (commit)
       via  8b349f6730bf85ccfb37d368aa18db4f6c0aaa1b (commit)
       via  4b584e952e14a40e81b7e360c75cd787ba988481 (commit)
       via  702e2dd653a315141e01147ac4cc2a6c06fab673 (commit)
       via  ea78ae80aa517556f7c5ac722f324baaf422f08e (commit)
       via  5d38929255f7d8cca95020672a2b72273a07de1d (commit)
       via  ba1568e6482268cea9dbf7f980a17423133c65eb (commit)
       via  bad7607f03104c81cf7224f6fd71db009219ad51 (commit)
       via  56d5c4a16e39b3aa6c1786e1ceebb8550c0429e3 (commit)
       via  96e22f4284307b1d5f15e03837559711bb4f580c (commit)
       via  b1380ef8f0534540970ee93a24f955db89891e05 (commit)
       via  673ef8efd5d474d66d62d134348730518160cbf9 (commit)
       via  599ec7f889bba386c838ec85735b203514905d9d (commit)
       via  44160936a4c52ebaf4be6e1f0fcc02c84c7fb719 (commit)
       via  db063ad7e102eafe75bda392197e9653be95bea4 (commit)
       via  f7c85718e562f5cbbd6eafeb2549a21f358afba8 (commit)
       via  e23b6b271c892905c9a14386aee502610502bba4 (commit)
       via  c3385a5449721914b56448705cae8af449e6d337 (commit)
       via  b10e71aafd6c8b4227083d8e1c87da8878198816 (commit)
       via  25e2cd129a9f7b5a944692152e173dc2896825fb (commit)
       via  d69588a14a8886c02a1510820d69f319171b68c7 (commit)
       via  f08602b2e2a311b9662e65c36c56f9f38461c194 (commit)
       via  e7a16b2735b09c0d5b55375e3091fa886940fc40 (commit)
       via  6e68b97b050e40e073f736d84b62b3e193dd870a (commit)
       via  78252609c39a14fb24a879c74108705c7cffed49 (commit)
       via  ad134a3c515577b5fbff5d05733bdf8d4265fb45 (commit)
       via  8da9b5298d5cbd0df840240e71460d047f4da808 (commit)
       via  18e970e16c5044da8b4a7d2c800f0b7baeab9f96 (commit)
       via  0b145510ca7b6d4cfe8bc43cd6de2563907dfca3 (commit)
       via  72f4baca540cc17e18da4632cb4d32df29f3a9a3 (commit)
       via  86123d1dc31432d176eb54fa300eb65e269df0f4 (commit)
       via  7e874ac36e4086fc0ff9b50537ffdbaeb685ed09 (commit)
       via  f0f4387faa4f6246546ee4b79e6289dd370913d1 (commit)
       via  13c03c7116df55fa0aad790c2b2a88f3743ba95b (commit)
       via  65b9917a960e8b49a947bed1886d1331155b95f5 (commit)
       via  5d4e05531e443e355fbf8369a37efc239d1c95c4 (commit)
       via  c92981134284041b71efc68cff49fead91368e47 (commit)
       via  60c6d07decbe759bb57da7dfafc79e71c52a9c6c (commit)
       via  5634285ef8bed69dcceab61e84b7aefdf1c1ef5d (commit)
       via  e0c15795fa09d93fa8c6e3aa0722ca9ed01b61a0 (commit)
       via  27f88f2ed0a0a7541f3ea9c6d95db5c805e4b062 (commit)
       via  1adb9636b2ba1314140411cd142f9b2f95afede9 (commit)
       via  439b8e22a099e641bbe9236bc44beed78634568d (commit)
       via  3c5fd61a4a294c75b6e90dd5a78aec980c387432 (commit)
       via  4e4e22281cd1c9dbec42d7c0a2842c92a69b1e8e (commit)
       via  d3fbd47b4323cbd12fdf3c07af74a6dd7514492a (commit)
       via  6e65029d717a44a8250ca6d7e9b57c2927ea6d11 (commit)
       via  46e8133ce6aced930a85be2536b5cf1e493e9ab2 (commit)
       via  431973bb16c68442614aac015f38e44ed39e960a (commit)
       via  4cf570ad0a107cbf70a6e96e8db30eb2c8b8a2ff (commit)
       via  3872fd58983155a69817da3744db56ab665e9707 (commit)
       via  a670d589923976af730c4387c7c4707552efad0d (commit)
       via  edffc4851f7373294b6486a5d6171f406f7e1de6 (commit)
       via  299473702fedd1cab6967683ad7172b88c35f353 (commit)
       via  1814e37c1b61225bdb05ea81c3fcc6ea3320e8d9 (commit)
       via  b3daacd1da0cefe8adfef38f8d324db55a7f2cc1 (commit)
       via  499668edac6fd5929dc43391e3281046d0512388 (commit)
       via  c30cda06f4b0ce0dfeb99badab779b72175000f9 (commit)
       via  0a6bcde01f641bf024e9179d2e753d7d2ef4df41 (commit)
       via  587102b55b9cee15faf4fd9d9c4c3ad81c88666d (commit)
       via  3a86448c1c0a34fd3d27b3395167a63d5e59e733 (commit)
       via  03265efc1c833609c21c4e6d2daf8227d20f68b9 (commit)
       via  b341cd21229db04e564fc8da0398e3ddfe883e46 (commit)
       via  aa35477883e1a5b1740092296d463ecfd277dbbb (commit)
       via  701074ebbf30930b92f8b06d9cc88428aed8db5f (commit)
       via  e009438536339927f645f9a272c23e43cd0660fc (commit)
       via  7714dea5be87f0708ab858bf369390128d2cd2be (commit)
       via  6d197660434d715c4e57c6acf792ffdb0528c147 (commit)
       via  c3bde52d5e1e2748f9d60aa8740fa044260913d5 (commit)
       via  d8ac168592885baab953cbe6e416afc6b72d9e7d (commit)
       via  6d8da5a713e33008a9e8bac1ba24367a3ba86a10 (commit)
       via  d63457baaa31c80bb1ffeefd4f111c7d92685c8c (commit)
       via  dcd6d7ff4c0671a0995fe4051cea0e525d3f82bc (commit)
       via  7f150769d5e3485cd801f0b5ab9b1d3b25aae520 (commit)
       via  6ecf994e0d3580064d6c8a490dfba1a02e9930ee (commit)
       via  61fdce086a40930595e70168340ee68080b327bf (commit)
       via  1b328591b9bd5f366bc6e205aad0cde28e447442 (commit)
       via  61488d93393fff47ea8cce1c2b41ac004802caaf (commit)
       via  0a54d27ad889cc8931bc5a0b6549325c4fb3e45f (commit)
       via  6a54cfb961dae9f44120ae2da4bd4c3693f9ea49 (commit)
       via  f17fad51f1533f89600fb3c2e265ee2ad79c3f53 (commit)
       via  44113e516b30bb58dd7481b2b87a7f88c0ec51a7 (commit)
       via  81c031de6abed68c9fb4a89b2a71474f36488b9b (commit)
       via  ae199c79e42520e8e521668c6f9435796ca92aa3 (commit)
       via  7d4cc051f1ab3470bb5f7b5f8ea9e622fc7c7c9b (commit)
       via  bbfee9cc079856d3b437a1bbb69b4157092cbf97 (commit)
       via  797d30d14f37c6d3fdce9c1140ffebd91021bfb6 (commit)
       via  6bdd521c49d10867b63158837f6fdc6d06d1f256 (commit)
       via  56d8610a9e10792048a10cce86196deee928e203 (commit)
       via  9a4db0085e43df8d8facd885eb9c9a0b52280090 (commit)
       via  541ea699442bece13605d34e182ff89bca384a43 (commit)
       via  c88718438ee67b52cfea003b9e3ce1e5fe234bd8 (commit)
       via  dd7fb442ed97cc469db4275fdc3d4628cd44ea79 (commit)
       via  032f9633f4a353c11d0d855984aad0f0392a6ac1 (commit)
       via  f546d730bb772a8a4b9ea1737ed63d888755673a (commit)
       via  9895253cc3a1cb1431a04a4b6d8e9dc82a3e3bc8 (commit)
       via  7e8b9cdec8bb9e79069a4534d896a9a4061c3b88 (commit)
       via  a0cf3955fceb4d810997dfefed7abbf57e4ee1cf (commit)
       via  1032195dcf567dcdd1b500ebf177c415ff9aeba3 (commit)
       via  ad24f1d258542fecf0e83187f6ed5cc8cdce8ca2 (commit)
       via  9ebdb058e61cae1dda642fcb00ced7b7554b44fb (commit)
       via  b88c94b7119650217408d800847dfbbbcea7306c (commit)
       via  419b9d48771946a1b0b75b7412cd2da3e7f81a5a (commit)
       via  1f311bbc22d17a747ad394ffd00cb130f2999ede (commit)
       via  fb49d74df87f9e87a7d14b16a3a84b31976a92fe (commit)
       via  1817f1e8fd4a3635b8b5e0d581f6a2aa61b5e955 (commit)
       via  68ee3818bcbecebf3e6789e81ea79d551a4ff3e8 (commit)
       via  ca1d0935b9d65aa1f26dbe4f0cfc0c4db7701900 (commit)
       via  1561a91d494d02fbddbac1023b2c84367cb1887d (commit)
       via  6215c5929bdd6fbb708fd0a2ee034250aa5cc065 (commit)
       via  1a4da963a4b1378332c74feb96ff8556d11bbbc8 (commit)
       via  acab4a018b927cf5887b6de8135dbae0d2dcdbff (commit)
       via  84ded89c2309b41f3d7656ffa0112021436ec4cd (commit)
       via  5bb1a0ebbed603d81656d5e87196191f1b00aad5 (commit)
       via  d83a117a090eaf417698eea6697ae750dc45c135 (commit)
       via  ea7f5ad5d326b7ed2d5f0ac1729c2301555b6417 (commit)
       via  68ac89fcb9de65cb1c649aa58b317be3fc793fb7 (commit)
       via  7f1dcc956a864b70e395d10ba095c0787db802a7 (commit)
       via  a3e7bf95ad016c9badd98c16614de4a9c168bad1 (commit)
       via  debb22346698f1be3bbbac4955fd6bd247aa41f4 (commit)
       via  c2d03d1688ae502c4e0b1eb23427ebae5307a091 (commit)
       via  3439230170effea0daec2a106a616965d4830968 (commit)
       via  ca54736634e25786f6d54317e97f3e4db71064f0 (commit)
       via  911b53ae021dbd04a6c12f69aa106fd2d868d54f (commit)
       via  1e465d5417011d24cb9aa9ffaf80a369b6511e2c (commit)
       via  c82f6195acb5a12e91d61956b8b958ceb0a0f821 (commit)
       via  b458fc09d6749b7435cd3c95952b9ab22322cb49 (commit)
       via  d059d370074b13b36db3ab685c307ba668faeda6 (commit)
       via  d8e223ad5439cdf9916e96178a4320403615b507 (commit)
       via  b8031ec74703c03eec1be362f0d3e321c4d8ebe5 (commit)
       via  2117c1db277b10f3bcc48b51d2ca0f821af79f2f (commit)
       via  e5d4874ace76b0caff412f2394a15a042492560b (commit)
       via  76335a521773c8118b7137d79e5f6397614f1904 (commit)
       via  292665a460ed22219490c742d52785b503002029 (commit)
       via  31cf6504b544e20f5ac84e3f74afcaff817c3693 (commit)
       via  0e6639a8432999f2880473b815d8fbeb335a6808 (commit)
       via  196b9474f5eeb11a8d96e52fed500270331dabc6 (commit)
       via  296a70859ceb0b168c3818a3869991e8b51c3932 (commit)
       via  f6f425b5e49110b76e9954dc71d152806503c0bf (commit)
       via  fa9b8636e68a97293c26f51f4ecf50a2753965e4 (commit)
       via  e438bc6f5d4da2cc953cb76b9a924077d11fe347 (commit)
       via  043963cf999791194e2db9e59fb5920ec30fc20f (commit)
       via  fd5db1f7aa6f49091b4e66193b0379679eede4c4 (commit)
       via  190d4380c54493561eda99d66739f31e515f8e4e (commit)
       via  2a08eafed9264b790ada134bdee7ee02c995c50e (commit)
       via  2d84595398e0a29bd042b848e986e8aa7bc40f75 (commit)
       via  021f3f24fcd053a92b6ee305b984f1a6a550cebc (commit)
       via  937b5a6f47d4506193de9a3ed77adcef2cb1fd35 (commit)
       via  c110fcc95f61b07871fd6fe7e24a495a3f49b89c (commit)
       via  58e72cb159391aa0c7832d08ddb0df361514918e (commit)
       via  26f4192ca701bafca9460e994c61715916091e37 (commit)
       via  86b3f90af4f0f643e44fd3f7cfd11d89a42e4ebd (commit)
       via  67ea6de047d4dbd63c25fe7f03f5d5cc2452ad7d (commit)
       via  ce532896000ddcc026045a08ddb9ae2b96ae7ba9 (commit)
       via  ddf232dc82203a777e0a59aa9b8252aaf5117548 (commit)
       via  41c8d6f1170f06e1da8908666444c88b08906f1e (commit)
       via  14909927e06d884129baf8baf7fd8760b2dea196 (commit)
       via  4a345eca2184ebccec3a17902056d03f5d00e540 (commit)
       via  6217a55056c1e2e6fa8d82357d86b218de43ded4 (commit)
       via  32012c8148dbf25fea0a490bd8453fcfb3854cbb (commit)
       via  01bc2a7ff47131144717e923108f71eda283475b (commit)
       via  cee641fd3d12341d6bfce5a6fbd913e3aebc1e8e (commit)
       via  ed787b2156b0a7a88ace941447783c53991a254c (commit)
       via  621c92d9a19379bb43e98c821183be1aa4d97c7b (commit)
       via  e9c6c3cf86e3b1b02c64bf567f0c20f6c1e2f589 (commit)
       via  004e1238d580d601f7fd8847ff1c4933de465942 (commit)
       via  5da6a0f0e8829140999c69bfb551a305c6bf0257 (commit)
       via  25ce3ceaf98ed34ad3a4ebe3cac901c0b6e15a97 (commit)
       via  5756a9c761748b960b974f422963fbf8e5498378 (commit)
       via  566e635f4f2647a82457acb9c047d890f4cb459b (commit)
       via  76f58b2ff1ebc572cef465f5be1445e08e4bf0fb (commit)
       via  f89dbf486bbbd41c3f4e85c15d2cda91706a37ba (commit)
       via  a91fbbe9905680873c4f0acf5cff1d712aa68831 (commit)
       via  86cab473cc0113b0f83755c14db4035bae675065 (commit)
       via  ca22c5ab2322ab7620e4b84589da6761fdaa3b62 (commit)
       via  838acbebc584fee662143c303b7a110563f4e0de (commit)
       via  c5d29c73bcd554111ada4dec49f61cfde497cb6c (commit)
       via  4ec7a8d9ab678f28abf3b37d40acdf159344cf0f (commit)
       via  92b2aa9c962e9ca1cec80f44bee713afa1ac53f3 (commit)
       via  6b9d28f7602143bb85fcfcefbaa35cde95fdbde3 (commit)
       via  223b19a30e4897c7281bb40c9f366a01c8f449ca (commit)
       via  bfea61834be28bc3c2413afb586971fc04056a41 (commit)
       via  620072324ac5f111f8fd40a4ba6d10879c44e211 (commit)
       via  522d27a63d1ff318173e7e4aeb6c1265aba93ca5 (commit)
       via  c4291199d0ebab1cdb49b80101239b9582c13148 (commit)
       via  9d9680719eb0ce32ea039386bfe767dfa41d1968 (commit)
       via  2f39435c981e3cb14d2c4e9551af93fbbfc28109 (commit)
       via  2276752655f67044fb6ae8f7e14e9ba5f6ee6638 (commit)
       via  54ba29f03a62c84ee9cbf1c92db74b57327a1868 (commit)
       via  6433a51cb6e72309eb027411ea4fa98adb97a7f8 (commit)
       via  4865dbd45b6f94b20b562b11224754313e74bf25 (commit)
       via  c7b8783766258a4321622b7d7e2bb02a647d0864 (commit)
       via  b8d8ea4cfb87fd12abe113cf63edbae4a342e6c1 (commit)
       via  6ce36056a14fad339ffc6528343aadf12065ca44 (commit)
       via  8dbb407ba4adc1bbaf061b5680bafd35c778cd90 (commit)
       via  f9ff938c75816df97f318a839f01be3f01c93f2a (commit)
       via  a69020025379d5430fff394465348aa430533458 (commit)
       via  af27ec87f09d82918b96c9dd6d236b4e39989f7f (commit)
       via  b47533e918cb5b0c2befe7b0da315819b009c47f (commit)
       via  f4c7155d41cb008a1a180e567e142ce096a21b88 (commit)
       via  d647a4589362d2b6efee86e58c9fb38e7084deb4 (commit)
       via  ad26a33ce7624ea2a9b8685f1255db0c1f80bfa8 (commit)
       via  5f9a52abbd9f785ab878e71907c8b6dfa587fab1 (commit)
       via  8b024ff407a6987a4c7e55f51b9d08c1c9a185d3 (commit)
       via  a6646243196e059b1c137c939787117e78523b57 (commit)
       via  9578ddf805881e30dc4ae2124bcae6b24b580f65 (commit)
       via  c3dafdb35cc8cd72e972a0d8212ddea3c09c330c (commit)
       via  a3e0ed25ca88b74d92e9bfa11414e137b5982de8 (commit)
       via  554a8b40e84b9f778f398b7ff15d86140255bc89 (commit)
       via  41040f22c80fd9f300f4f75cae3177a5360a80e2 (commit)
       via  163fa80307a1ab8882a1895ab1e2e12d22c8fd4b (commit)
       via  337198f6b8e619d836e9c51399be97e7a6038674 (commit)
       via  b0b09a77b7dab7b961f4424d05c135e9eb045b80 (commit)
       via  d1897d34676045b89edc09a767f8d0ab14d662c9 (commit)
       via  acb0565bb4ddaf1d51abc511459478e738dff6d7 (commit)
       via  99aa9fc05044158e0f41e56da538bd1162d869e2 (commit)
       via  e2c88f03e394ed8ebcfcff936ee888bf593e22d1 (commit)
       via  930d4317ed56bdb7cb0d7d53bac5db297ddd0551 (commit)
       via  1db00151f61cdd1c58bcd80dad38f3f97c67dae7 (commit)
       via  b684bcd2eb5a0def50c149319ab8df379155121f (commit)
       via  ada1705cfdea36539c48b1e7fc6a0c5cd7f3d8d5 (commit)
       via  fd39e4e890ce175901311b9e11291628743edde4 (commit)
       via  b9a2e86c2ee8d688a3e12877eb6dc4894a7eae24 (commit)
       via  ee7fff3113f67a7dddbf069e2a555e6dbac97f69 (commit)
       via  681e0e8b37fcf732b0f4caabae3695756e6a1e9f (commit)
       via  aadf8f9a5d8dfe13ae2196b61c406c8a8e1d05e7 (commit)
       via  c38fc5257ebbd0cd444e6b9cedb466b31df66cea (commit)
       via  ee8c0398005c5aff53be33b07d9d0b6fdd353175 (commit)
       via  45970604c644066bd34abd15c8877d5969462250 (commit)
       via  044381e03b7f178c7c322861960b79c8a27bb4b1 (commit)
       via  f2b5473fc2f2dfa13485fe9822e84fadd69ac950 (commit)
       via  0cfe2c51b17627d777cec9c33f9652b61f14427c (commit)
       via  1bbe7018238856b949b449bdacb43bcf90c79bef (commit)
       via  d7711ce221b04a035afa9d454c8baf53ec0cb9f0 (commit)
       via  625e9b719947e894ad7369d8ca61df23ea31b243 (commit)
       via  2878aadf0276c7a52832c7ca7f3bddf5e348d79c (commit)
       via  b9aefd1018b099666908d64650c8a5ea3e153ff4 (commit)
       via  8a40bdd1108f37caacd6bc5f367ecc1587ee53cc (commit)
       via  ad1161678c25ee35b1cb7d657d1aba411939efdd (commit)
       via  0b6ac7ed34c708e6e92c41dc28bc8589864cecd3 (commit)
       via  b0e43dae72cf709cb01627eb9e3095cc48989f4e (commit)
       via  c12ca1170ad094e0450efeb462328ec6b6ec7e1f (commit)
       via  a730ddd17c2a20dc55247b5a86d05e3d0bb740fd (commit)
       via  0c88eb0d723fa43865e185b201aba2685173f378 (commit)
       via  9ec6d23aa2ca58dd13a45821c92a926a0780591c (commit)
       via  4cfd27849ef6f2e99ee346763695fbc64742d783 (commit)
       via  46230c83bb8d70e170fe77e9e936765014b762d2 (commit)
       via  594dc98507783efcec6781077467885990094ec6 (commit)
       via  656f891efdbb6cda87d10a06f7c2ac883e17fb7b (commit)
       via  691a23f33ccde30a0d741b98bf0439228336af01 (commit)
       via  1c16d6d7fe6253041362ff994e7594805c297b89 (commit)
       via  8eb6232b0094778b4c195a870fa2c06cd1b7d0ab (commit)
       via  f6445b024942629726daeb591f99af090aa43c28 (commit)
       via  146934075349f94ee27f23bf9ff01711b94e369e (commit)
       via  b126cf8dbf225ff5b12c9a7b6a241d80babf8a42 (commit)
       via  911cb21ff76c1b887d8ce5e52a3059eaba9ec7e0 (commit)
       via  794cb37669e1a0566c6435e38e247ded643fa96c (commit)
       via  de9778c0c9db5a2e6ca3cfa384ae5a7b84120281 (commit)
       via  14c73702aebaea61c543730e4aec2608b842b5c6 (commit)
       via  625d818594e468ebd8bf89a6c09a97208b58071e (commit)
       via  81240b14097c5311ba5585f01f344b18b2048fcf (commit)
       via  4a4d7bbde30de5eec9cd7753e3f44c92c2c057d7 (commit)
       via  ff571bb13401ce21184923c973ee2cb67b85cade (commit)
       via  3a7f572d5a406e294373ba56b1a0357252fb30cd (commit)
       via  b235b396ae97ba25d59f5981da39f1d1e4c072e6 (commit)
       via  03753df452d1871af6f82824286cc07ab40493f9 (commit)
       via  c46aac2b5c86d037c7c3f34fbeb54d7ac0998817 (commit)
       via  4e99a42d3634690c74963ec9fc5c45ae21431775 (commit)
       via  2e74ee9f329249738ddf00599090f94ef80eecc1 (commit)
       via  6c2de9d212bbb3193304a1ae4fd54ca9137262ff (commit)
       via  a95e95882763d10fac105a93826d79d5ebe4a449 (commit)
       via  7d1e13b7fb6a589336cd83bef4f81fa077785beb (commit)
       via  bc1c9342a382378d6d659e3fcf87d6730ea71e81 (commit)
       via  38e4a2c44f8f73f81b56e54a7436bb9662b9851f (commit)
       via  af3b17472694f58b3d6a56d0baf64601b0f6a6a1 (commit)
       via  0e4960761e5bd30e5157e45711da1013d825764e (commit)
       via  f00e85fff2018483303ccc3dbf7d85b4052cae1c (commit)
       via  ab1f7bea793d2435080e5cc018f115169ddf07f0 (commit)
       via  c5753d1c96374bfdf2c8e9fc0773ac036082cfa5 (commit)
       via  ff329082790af7572016f64a90f62c7be87f593c (commit)
       via  32007ad7c992f395895eb8f27343003cf4f94a20 (commit)
       via  5d1dc735923493b057014df7fefa8c8d7b04349e (commit)
       via  5fb87e69f26c800823be33e81f99e1cb2143e067 (commit)
       via  49b9f8004299533dd7e54bde3820984d8b04f37b (commit)
       via  19722a540a20a2dc5370df4d8f1f0bb326175001 (commit)
       via  8724a537b4f7d9d93bf06c2df860ea83f247461b (commit)
       via  d903fe92287645e9701890b0953bd84529665776 (commit)
       via  ecb3b76489bf838fe32030517e3c8b23000d59bd (commit)
       via  df1298668ac3e758576b8b2bd6475c70cff7a57f (commit)
       via  8f6ca91d01a5155ace94f0c044e674e58f8e7898 (commit)
       via  f3f87eb305123de57135aaa96c12190f3bf1951b (commit)
       via  f5b0e46b8cc66dc5aeef4df4d4e938ab0f4cd3da (commit)
       via  0a149e0c7faf8fc0db56d4804acfb3df99dcebb4 (commit)
       via  5ca7b409bccc815cee58c804236504fda1c1c147 (commit)
       via  1e6ee8d85fb949a2e39eb0f130b6a2d4a6470958 (commit)
       via  a903d1aae9ab0ab3095144b9d2db7d5fc606b187 (commit)
       via  dcbc2db0a515208db5cbfc5a2ba88c14120ba1bb (commit)
       via  be1f9361884f15155c20fc8f8979d9ee32102713 (commit)
       via  4f423f99cb3b73d75a736c9610f3faf30cc3d837 (commit)
       via  982f6e4d7e7a2ffb0d17add0df1e5643aa38c092 (commit)
       via  98104aa8ac64b6602fa6c1c7c7eb08e9b43f0fa6 (commit)
       via  5220fc55e92504899d102b5704382382a4e450c1 (commit)
       via  21d7a1b1870466cd8b9f6203d509d9a9601e5c87 (commit)
       via  bb1400f97e377247cda132a14cdcb5dcc3f456e1 (commit)
       via  1d007555e13f0e148014b4582f6fbd8b6b7fd386 (commit)
       via  9163208c660f8ef8c4b1dbdae6c0c785c516bb1a (commit)
       via  f5c9c2f489e84de596aff390c498ec31fe44a5b0 (commit)
       via  56bd0746aef2a0b789402844e041a268b889b081 (commit)
       via  c4949d3d2b74f62824b670cf8d07cfe9e965a661 (commit)
       via  d76b95edce86919636ee0e458f0b9def08a9d2ea (commit)
       via  d4405856fd2e088fbc7cd4caa9b2e9a6c66e8e83 (commit)
       via  99fbf7cc5eacc8c0ec65a19a1eb83b4e0a329cd1 (commit)
       via  ff4a86af49e629a83e40f35c78c7616a208659c4 (commit)
       via  47ea557d9d8a9782e4d576c45c545913bbaac4ea (commit)
       via  006133b49eb5d44eeacb1d79593b97ae2212bbca (commit)
       via  261656de6d4fbe2f6979162c978023f8859d2289 (commit)
       via  419768f653e098ab566c9f94771552e2bfe3cc99 (commit)
       via  affa93f31bbc9719ac4e2ccc0e44d9a09c2b6a3b (commit)
       via  5f5d0ee169a33ed2edf6d80ed1c7f557b993eaf5 (commit)
       via  a1363e4b0e747cf0814e6f6575311aba9aac1ef2 (commit)
       via  8028d7abe44464aacaf7e8cb82a5a23d39fb4489 (commit)
       via  b6465a25eb8106081484d17a48c75031c14c50d2 (commit)
       via  a6222db2c3da815eb23c6deab6390066b0969670 (commit)
       via  6117a5805ef05672b9e05e961f7fa0f8aa5fca0e (commit)
       via  0fc138613824f16378ba2b5462886cb2d97a318a (commit)
       via  7e8fc92cb83d984188bd1556ead421bee39d9581 (commit)
       via  929daeade2b98ad56b156eadd22308bd6d7f265a (commit)
       via  64ba30803ae7a87f1c6bc21eb1a45c413fb6ce43 (commit)
       via  6588fc2759e5901f61327f170bb9ce0ec3d0bfcd (commit)
       via  4a843f9058e625685cf2338fe89c9a89af3c1777 (commit)
       via  1f77f60db465b53342d88b4280b1889f1888b411 (commit)
       via  7ae9a73236a4bb7eed9f02b30563b7f4e040863f (commit)
       via  35f2bd564e1e0311e3440f09bf81aac822d65a1c (commit)
       via  f5bb60e5636d908de8534d35b5f06142ae2a8c3a (commit)
       via  b8d12c83236964f6bbb5cd3910b0960abd0117c1 (commit)
       via  c260476dc19056181931668db6316055526f4daf (commit)
       via  60765d3c47eedd4bf412b53c2ce47c5de41be8a8 (commit)
       via  b26befde4983f01b37f7e29bc8ebb8dbc7f6c1de (commit)
       via  d178a34c2798221f7cee90d07bfced84df4908d6 (commit)
       via  da9206de5ccdb4ff074c0246856ac8de487eff40 (commit)
       via  6aa910d6307f825013e2e0d7b5b1e4599a634f1b (commit)
       via  9bbc2ac61f19fe7d27f3268fb4de7dd727a59bb0 (commit)
       via  ff23bfe6d68eeb0972e9b01a45b950e6ae781b01 (commit)
       via  0fd60764e65b270cafc1b3b573e5ac14b3c633d6 (commit)
       via  a3c0c4cffe519c5d70185ec989fe707102a71836 (commit)
       via  d119c3155e617e120c69abebcf8d22e500dcffca (commit)
       via  c80a7b8a53dc04e66b55431e2d4c637618293dae (commit)
       via  31830397285a50d1636622b58d04fffc7ca883ae (commit)
       via  c96f735cd5bbbd8be3c32e7d40f264ebfa889be5 (commit)
       via  5275b7b65e2ce2acf92528868c9859dd4407c4c1 (commit)
       via  973fc74ac4c030a350f436e96d37a582565e41dc (commit)
       via  95cbc4efbaab12b66852ede318cb9af0d3f8780b (commit)
       via  44f582bd7d16b11259cda469de5f56bad9768059 (commit)
       via  48b88725698f7e4979f577c0e86689160aa758c5 (commit)
       via  90ab0a155bc5e42ef2ad35569968dd3db9c501bb (commit)
       via  a46eed49d48d22fcf83eeed363b559e4b60bd840 (commit)
       via  1fe148279b130dc4c8c072ab3bd1006cdacfc9f6 (commit)
       via  137d1b29b6063f4d1983bde07f6ec5404f67dcee (commit)
       via  1afd287718c25b673beb31903eb80bfc9f268d87 (commit)
       via  a4766a155b7821c9b7eb5e126988007a95118dc9 (commit)
       via  8f3c0649785d7fb0df37a9ba9e0e20c978044bb7 (commit)
       via  be3038ae1b595d1b9942f9aa72fa3d96aed3b22d (commit)
       via  2a2aa4ccfb548b2a18b10e97acd80df324c5d4a8 (commit)
       via  02acd96cff43650110f4af6d2fb2a8143887ac00 (commit)
       via  a6790c80bfcefde81e032db9d3a45c7a9e48faad (commit)
       via  2342cdf1ff5563c6afa1901104fe4cda849ad345 (commit)
       via  5b302edc6302331a2c39ae1ac5a18759e47340c0 (commit)
       via  85071d50cf5e1a569b447ba00e118db04293475a (commit)
       via  70f720080190f2ec3536bd5c15c7ada18a7a7fa7 (commit)
       via  3898b36a132fe44e51cc99674104d9e1f0d35d36 (commit)
       via  ed7eecab42af0064d261d9c9dafd701250bbc1d3 (commit)
       via  2adf4a90ad79754d52126e7988769580d20501c3 (commit)
       via  d6616e7ef66b3904e2d585e7b4946900f67d3b70 (commit)
       via  c4344fadc93b62af473a8e05fc3a453256e4ce13 (commit)
       via  a9b140ed88b9a25f47e5649b635c8a19e81bfdee (commit)
       via  f5d7359a945241edf986b7c91c0ad6c7bcf113e3 (commit)
       via  f1a85d0c97636ce15d830ef56c3440298f3773a7 (commit)
       via  e81b86767a740bcb1c4d1a0408ad9a70690df0a6 (commit)
       via  5222b98f4e2021eb543f836d5e6876eb28eab716 (commit)
       via  0d1e50106720fd7c4ec58e88e381ce7cff071648 (commit)
       via  8d139f70ee129787af631531e4ea825293007a58 (commit)
       via  6b206d435a3dd92ef4a18f1c4558da147016fe4f (commit)
       via  cf136247fad510f55ba230f746558274fada1de6 (commit)
       via  5f5b2d7a98eff5dc74f74b7018f50e286ae82c2d (commit)
       via  7209be736accd15885ad7eafc23b36eec18c2213 (commit)
       via  703cd3ed5855e673443e898d427fdc7768c5bceb (commit)
       via  1a035954731fce34faf09705bc61b7eb0ad34ac6 (commit)
       via  ae43bdedcfaabacbc8e4455313e6a5b4d98a68cd (commit)
       via  017b4e1bcc7fe62f11650444518f422934c925ff (commit)
       via  e9e29a281b0b8b9d91fe9097e51c7e5df6d3ff78 (commit)
       via  fcd39b6e84665a033d7ee4c06bd904e2b416c53a (commit)
       via  ce00497088209db82fbbabb80381acf92039763c (commit)
       via  0fbdaf01b0fc3d7031b51d542b91f6f758f033fa (commit)
       via  b3a1ea108d3df58dcd2d247fdc87b3d1fbd953cf (commit)
       via  2de8b71f8c0e7d02e25aa7ec6fa13f9933c8b534 (commit)
       via  4edd9c38112db5161f46533ffb3886c85880ee03 (commit)
       via  bff7aa9429b7e0a9f26f69dd24c8aa7efc64ffc6 (commit)
       via  1e0d70a994d9cf9cabe10d1205c40b74af2a2bc4 (commit)
       via  738afedababcfc874fe107d9bc408d69d213813e (commit)
       via  8ed59723a5ae90dedcbf741254b65f88a4c98ca1 (commit)
       via  3f2d29d0dc92606fac3ba306c34a32a0bec8159e (commit)
       via  3bfaa404624697f5e2f08076c78f94a8438e851c (commit)
       via  f85f868171956abcc1996235a26a276da2ca6209 (commit)
       via  1982c235382043d87737ec24779d10da216101a6 (commit)
       via  f6c675c19790d3715445a7877cc8d1d193f17071 (commit)
       via  419fe34aec67a7bf30991f7df7b568133d8eb541 (commit)
       via  96d5830820c86c06517fc3f4c0f1c95c478237db (commit)
       via  5ab87bf933008827bc39aed0cd4e6bf4cd71a062 (commit)
       via  7225bbf8e6e3c892159124e7795f7396b5764bb8 (commit)
       via  2056251f56e4c5e3ff785b924061fecfe1ac21e4 (commit)
       via  a5eeb73116cbc74f6bb3fb4a06b99396a8ceebcb (commit)
       via  743dad9408b0a86052156e6a3d4fec1001600017 (commit)
       via  af927e2c390b49012b276c11991a3f7ef3a592a9 (commit)
       via  d267c0511a07c41cd92e3b0b9ee9bf693743a7cf (commit)
       via  42968abbd4edf489d4d667089033d11e4045f463 (commit)
       via  33c0d21361655c08b274c75736b7bcbe99dd3d2d (commit)
       via  e114429f15c0ff8b5eb77728985281afcfc0d37a (commit)
       via  6dbe35be17827ccf8bfc904be707aea01fb4ef94 (commit)
       via  a8a8ceb589f9f3bf4da29717eec446cb2766032c (commit)
       via  1f6c32ac6941c3c2ec456017e73ea74ca5944e1c (commit)
       via  71488ea628a1d791eeba41cb2eed3025c6311565 (commit)
       via  956e210239d46bebe4574c5ca38b3b51b1bb7c65 (commit)
       via  fe76209cd8ad96144f0e2fc9522f5fda1d52d9c3 (commit)
       via  cadfcca91ef5bdb2c72c9db4e918ff6ac7b10e65 (commit)
       via  eb4917aea94d78ea64fa90f0c70501bbb6d48b37 (commit)
       via  a01b47d66272166135c20bf15a958bed023ff009 (commit)
       via  461acc1e4b464611411ae77b7a72d65c744a740e (commit)
       via  9163b3833877225c8b9bd8e59eb7159ea65d3867 (commit)
       via  e451eade196bc7cc43102412a73faa397253c841 (commit)
       via  5a2d0b61afe86668613cbb83a75708b760aae76f (commit)
       via  43bbfab2cc57a08da6d2d6ffe8da92efcae9c2ec (commit)
       via  38e530b762f7d05caaf06aec41c6df432f0800cf (commit)
       via  e8e1bd309d449b23dae2b472b650a130300aa760 (commit)
       via  a5adb8e45ee8c66a19c46bd1bf5f752630619be8 (commit)
       via  85ac49c5282c231c71b8d2046889d22b0061db08 (commit)
       via  ebeb5ead60c5c0d7b16478498b78a8f1ef3b71c3 (commit)
       via  e38010819247006d20532d24de8dd6c37e0ca664 (commit)
       via  00f4c38428153bb5ad99ba1cc40e9a204266dace (commit)
       via  f7bb760f4d8290d52959ea83b090d1877e4ac9ee (commit)
       via  b29c5e5221b8e6a9ff65a0c39f14c04afaed5c44 (commit)
       via  9e17bd49b426ffba00312cf90ec80d178a20b964 (commit)
       via  519720d9c6eb354a2e31089f1c7b8fd0760053f9 (commit)
       via  c6babcd3e44bc42fdb090d3a4837848d8c7c149c (commit)
       via  bb444bae93e8e87d1e62214b1819fb73fd7634e4 (commit)
       via  8fe024cd171ecf1610419abb70e5d613b94ba5a0 (commit)
       via  d36eda71276b43e4281ae53fd558155725f4d4eb (commit)
       via  32f075fa288dc5ea049cbf72657386889144bd12 (commit)
       via  471edfd7a86d91f04536bc7c7fb42ad7239e1731 (commit)
       via  feeddd7e5b966c9445fc4ac97a6526fa792413cd (commit)
       via  d801b1e2ebb6c9cf35e3475040b013784f3e6e41 (commit)
       via  e4c78a2739dddade3aaaa12528afff944458f777 (commit)
       via  48bec4c73b92679e91f0cc72fc63bdba9c593e87 (commit)
       via  dc5aa6284fe6b6f51d85270969f0befd8db1f838 (commit)
       via  15e60f1f54722c32c9977f00e49c211f047ee08f (commit)
       via  85e4dfa61bf440c132f4ce6bc73130bc6e91719c (commit)
       via  7ab4a102ee610f36b4362897431e4fbbeac735c5 (commit)
       via  054699635affd9c9ecbe7a108d880829f3ba229e (commit)
       via  d04acfb82c3425a638f09d2f49208ef86bc7a6b3 (commit)
       via  434f4fd17dd3dee1d17e7b2e008f1ab1416d5799 (commit)
       via  ce8b5fe9567f06f7acba34b9e9b35ad471e2ab67 (commit)
       via  34ead9dfeff5f64af36a209cae28075fcbbb3330 (commit)
       via  fcfe5af9c22c5b666e5ecf646bbe0d9da7b655e9 (commit)
       via  1f967a8ffe37f6732dd628d28a13abc442541c38 (commit)
       via  3efca5f9b7b7bfeac53044fdd44e5add61397157 (commit)
       via  a35b62699480e149f22f4e039935bfcf41f97ac2 (commit)
       via  9dedc72e89b9ca8ba2c5f3bc562ad9ccd1aa05b0 (commit)
       via  7808524aa9bbb424327ac67d7408647cb18840f5 (commit)
       via  5b866ef26bd5ae980bb86c494a592ef232552b68 (commit)
       via  a5387c15e93c6d1925bf4ad0eacdcfd63790c32a (commit)
       via  d56c782197242e32ccdd23c9e3652ff520f3d58f (commit)
       via  bd8cb42b61666342ee8bc6c33aed2a168301ff67 (commit)
       via  9accf90bb081b057023479f0a86e54017b02cdd3 (commit)
       via  9eafb04ee8dbd47022dd9a5e5c1310f88f398d2c (commit)
       via  7af1aeddc36a1ac1343f1af12aa29164f1028f03 (commit)
       via  15f5d7895a2744376062229cf19593016a773cde (commit)
       via  ddec42c7a23cca11903ece8f7ab614dcc7e5edd3 (commit)
       via  d8cac904c7aea4a652a47afb35aceb6ca4808ce8 (commit)
       via  433381e5ca62418fc90377d16f1805260b27b619 (commit)
       via  c8bbdd1d74ac313d8b57d8debe4f7b75490e5df2 (commit)
       via  e57c5196d3e8dd56b0190799c98b56a5be55333a (commit)
       via  06f7bc4b3b69e8fda96f6e626a7dac5b1fbbb233 (commit)
       via  0aa4c14ebd1eb0a68c2bcf5c617325596657ea71 (commit)
       via  9daa2f686b3bdb03b13e9becf45a722344888cf3 (commit)
       via  f159ac66aa577889514dc170c87a92c49be5a6cc (commit)
       via  d6b86a88c7a486f2e5b742fc60d374e48382320e (commit)
       via  5ddc441f77a34158039f0328c3ab7c2106b7b3b8 (commit)
       via  290e89c515e051dad269f1acbce0b52a541d9c8c (commit)
       via  9b8925a4d0ecbd8a09d307dfd56fa15fb8eedcc6 (commit)
       via  53314ecb63f3f0f85629b66a228207658d8fd73f (commit)
       via  863509e2dc3bf96fd38476d787abb62e0da46624 (commit)
       via  fe1d6665faf06b3fcc0aaf8ec72905aa4b7ce1f7 (commit)
       via  7581a21a7dce1dc6b92ad24293b4269a3531e6d4 (commit)
       via  1fd37ae8a4bb25a6e85ffb2158b2ae95fe8cbd04 (commit)
       via  8ed3b760c179df435882f2ad96b6dcfad5b6e9fa (commit)
       via  3516ab551851273faeeb0b8696695e5f3ffc88f9 (commit)
       via  9f8ddd6ee1b73c9403f85b6ef5c85605ca393aa7 (commit)
       via  898485cd30084d478e8be688151cd11fb4d492a7 (commit)
       via  30f4856101bf23ce155ef0f2ebd1ca6f034d2420 (commit)
       via  eb4be17ddf3b26c379e3f100cf8e8b0fd4329537 (commit)
       via  ac06a06d1df9a1cc905b224b79921b0d0ade4c05 (commit)
       via  611d0300fb8bb2e87d787023cb5c6030ee07d8d2 (commit)
       via  fdf02d580f2bb1fbc6fa85ee0edd81a07404d1de (commit)
       via  a0bb482b46bd05f8c8774bacdd26dc891cb3bef7 (commit)
       via  cebd7e3562312ade50d972af49239cee7f10d057 (commit)
       via  8750dc3ab772e29d7374d779cefb3c8b8c61d2d1 (commit)
       via  b743e6ba98c8cbb53c45e1c0f59e5a78ba62f5d4 (commit)
       via  6556a2ffdd7bdb5370c2f1b3d8c9e8799ef82140 (commit)
       via  3e9189a483c0f53eba4f05092c90f7955123f52c (commit)
       via  7f5702a379516cee041129c03dd37d67f26d49c1 (commit)
       via  e60ecc91ad65087c3cff3af479cc455abccbe020 (commit)
       via  62bd7736311e166aea3604b8e486b58c1315f82f (commit)
       via  9687077033661cf07b6ea2e966299e837a501612 (commit)
       via  703d5f36d0102993f311d21e662a28492d8cf7b4 (commit)
       via  84d9095c66c765cf78814323597b2e3bbef293d5 (commit)
       via  e54bc83c4e8a66fd9ab1ae9f27899d70ef82a066 (commit)
       via  1a8c86ea2503bffe6dc1f2300dfc2b4efba108cc (commit)
       via  ed5311a26b7b1368f28191c405ec13da907213ae (commit)
       via  493a6449b37b34ac5fe36257b266c229e34d105c (commit)
       via  6f6a4cf9d98f2b4550e0949da1e20a7f38440610 (commit)
       via  36a53f41a7da580926111dca65652d6389fcd909 (commit)
       via  61681dac2023240a4a029072add3a39809ccb7f0 (commit)
       via  96dd4d2daf1fb91672a798fa478da0ec8a7ac737 (commit)
       via  9354737244e0bb7c22ec684ed652c89991eca913 (commit)
       via  9bbc77b6b8381c9a6d831e490a7715ba84b9356f (commit)
       via  8023760a5fc6f346cf82340aa50df755b0d0d00a (commit)
       via  cc0d6e4674fd2e6ebe3775a28ec87fc5c869f924 (commit)
       via  f9cb0d187f02078b27a0119ce42c83f62461a507 (commit)
       via  4fda2b6eefe81f1c197d32a0c8eb14ca1a7d9108 (commit)
       via  106e9a793499c81698cf5a938d48933f5e909af4 (commit)
       via  26691e282b76d74959e63524b280e77b09ac89df (commit)
       via  4cde36d2b97a24f03c192a61248545d0180fb856 (commit)
       via  c874cb056e2a5e656165f3c160e1b34ccfe8b302 (commit)
       via  12fd115d2e1ea8b55f43313ac665c32e07f9498e (commit)
       via  84ada921a2fe98489b578b6d780c1ad2e6c31482 (commit)
       via  763a994cb14bb11ba823831f54d64071319bfac0 (commit)
       via  b86d51b24e7d1bb4980426c9a74962628c096ba7 (commit)
       via  48d5ac59277e2e8b43f697a0d1d4b0991a40caa0 (commit)
       via  c191f23dfc2b0179ec0a010a1ff00fa3ae1d9398 (commit)
       via  8d2c46f19c1b4f435d7b9180ff6c2e8daf78ab2b (commit)
       via  80319933903fbdb359ef9472573bfaceda7c8cd5 (commit)
       via  8c838cf57adef3c004b910b086513d9620147692 (commit)
       via  1378551aa74712c929a79964ae18d9962ce73787 (commit)
       via  bb7833f2054edca11a32d24d17486f153db00ec1 (commit)
       via  c430e464860b4460a0ab32454e53918a1cc7444b (commit)
       via  39e529c506a4350cd676bf5ddff6d61686e8814f (commit)
       via  aba10a01b765b472d57112fd4e09a6fb47b49fa7 (commit)
       via  9688dee697e9ad279c6542bf164b820e907e526f (commit)
       via  c1a72c46b572eee2d94ab53a5589c724fcb1fcf1 (commit)
       via  9016513b4d19d2781d0b6f2575b490431e04ec79 (commit)
       via  13e8bc43e4888fe9e6df7e536ea0b439c6351199 (commit)
       via  e89895b7e5f3b7074271c89de281e426c53be347 (commit)
       via  938f4e9ba14954551fbc390abb7d1e06d38189c2 (commit)
       via  b0b0da67c915f3c02020397b8dcf6a078a9b3a90 (commit)
       via  1ee8ad4a2b092a6edc35c111c5a3b5b761da0dae (commit)
       via  c943619d223be1158ae8db5223f655343d06785f (commit)
       via  0d874a95d3c782b9c663c64be619f449956df457 (commit)
       via  2d325650009f46a1f16ef2e7c1f4ed0827db236f (commit)
       via  abe73e885b980aace1fd0df492fa321bdd35f01f (commit)
       via  53d45f54e33d23a5b4df42dc977a3a6ab597f5c5 (commit)
       via  338b54ef4631f0d35601f174eabfa10f1541f46d (commit)
       via  698176eccd5d55759fe9448b2c249717c932ac31 (commit)
       via  41cbf5a91bdfa0b311aade6b05d2f51f59cce978 (commit)
       via  d845ae918fe8dce6806c3f927a7c101fc0e2173d (commit)
       via  7bc93774a449b3f19748a37186db3efcd3d6c537 (commit)
       via  d5a58bbe641d32257035a6087f18655e7b66d8fd (commit)
       via  c64c4730852f74fff8ea75730e0b40cd3b23a85e (commit)
       via  fdf1c88a53f5970aa4e6d55da42303ce7d4730f7 (commit)
       via  33ee923f7139cbda7a616a83d572a4358f456e16 (commit)
       via  c69a1675dd0434db0b99682d14fa7905fcd3af8f (commit)
       via  9b23d60d6f58b18da3995dc3e090d7fd63233bcc (commit)
       via  4bb4081381b39c563707c03818a0f9d16ef7846f (commit)
       via  eef5b0eb5defdd22ef5e351213ab66531f788c5d (commit)
       via  e7f1ead205f2dc13d6fd6e2a28b121794ca281be (commit)
       via  638674c480d47cf957a8b4f7d61dda3320c881ff (commit)
       via  0a22b98c05bf5032c190fbfdf9fefceac3597411 (commit)
       via  f59415a8b5ee951dd298eaf8eecaa21e8955851c (commit)
       via  4e458fc15b5c236e1cc44565f6af313753e87a26 (commit)
       via  e2eca96f1876a72fc8c121c9204d49cb7e9eaeb7 (commit)
       via  4a605525cda67bea8c43ca8b3eae6e6749797450 (commit)
       via  85455b6e2f7063b10bae9938de1b70f5d319911e (commit)
       via  66e1420d30f8e71e867a3b5b0a73ead1156d5660 (commit)
       via  16cc75f764b6ea509f386c261b472e282cd606ed (commit)
       via  b2d2acebebc66495b98eef634ce633eb70cc2411 (commit)
       via  b1f197c6102ae31ded2e4b61103308dcdfa72a89 (commit)
       via  acb299784ddbf280aac6ee5a78977c9acbf1fd32 (commit)
       via  2418922a1389bbf265b02328f7c4f594257c4026 (commit)
       via  44a44c0b568dc997e7522292212e0ef02b522f3d (commit)
       via  250ce2abb3d6b48fce778b5e0c651d57582aff7c (commit)
       via  99be45a44f97942f9327b16aff368f1650994e0e (commit)
       via  7592596f7a9f8dce2e5e8d9311cc40c5199c66e3 (commit)
       via  c24c42a5e29444313efee6528f172ad66452050d (commit)
       via  5e14c4caafaa44b92134c5df01b726f435f46845 (commit)
       via  05eaa177051b212669c2a7b9e2194c3e9ba47f14 (commit)
       via  9797d47ab90761c50020f78d5a55fb2672ffd7c0 (commit)
       via  000164d51a974acf3846a6b0a7795f484e915161 (commit)
       via  0b46c391a973bb8d3f0a1681eb0a79e8a196f0f0 (commit)
       via  5e5743ecb40da81c4e8ad27ac8b158c9a7aaff87 (commit)
       via  9c95bf79406ae791e2f8c7263ff4fddb19d0eda4 (commit)
       via  7dfa14ccdb6777ccacb99fe0d716b7d63654426f (commit)
       via  f0ff0a2f69bcfae3e2a30a3bdeae37b475ae9106 (commit)
       via  38816f95cc01f1c7aeec1d42bde3febb308dd98f (commit)
       via  0f8868d1ed7d479d05e2a70de67897d133d41ef9 (commit)
       via  26841bf1f0c0f0066e17b53bea2261e759bfbdbe (commit)
       via  6b4582111d6f9e8a09e305ec3da009d8d393603b (commit)
       via  1b5cb4d4168c3fcc2d22bcfdf5260ffc36d0a42e (commit)
       via  f500fc46e6467263b38c50010170f83c10d22e8a (commit)
       via  114e59f9ed93ba3b6e656785df5d527011f8ce2b (commit)
       via  bc03b37015ab6ea23cbec70dbd299c74fb001aba (commit)
       via  e56e0f7d1ad206f1ebc26e285d82a8e7ff6390e1 (commit)
       via  7d2b0148161460b928cf39c7c2969d95d2870d9c (commit)
       via  58b843554162e6599ba895c8325985f74adef734 (commit)
       via  98cb905a5852321204499985efb42c5a76b9da6e (commit)
       via  f7a92e4b0336f3c64eb429947657952178b7d76f (commit)
       via  3ff9c6c215faa2e1419d4cb67906a1f7772b355a (commit)
       via  90b3952ff515f8746ffc6b227695836921bc046d (commit)
       via  0372723794501908ae94be9330dcd8577d951f68 (commit)
       via  6b27a7ba1c0343725e3d2e9ea7d97426a8f73f0d (commit)
       via  a8b5aabeb7b56702a85344434d7822a034ff140c (commit)
       via  87a3c86e7e132a1ee80bf29b418ad4b61cefc7d8 (commit)
       via  8b4f53f245ab45bf07be9b1108fca951133b836a (commit)
       via  07b6398dbd11037eb553fc6fcf56dc8051e71150 (commit)
       via  f0ef6c88066961a038ea1b80face4feaa9a2d17d (commit)
       via  8f9f4ece764df4607f695f3f7eb4c421e8ac4c9d (commit)
       via  7751d0ac43f1b7186a53ba5dd5cf2eeca6f7dc46 (commit)
       via  40cd22fc64c7755efe60cd42cb12851cf3de55a4 (commit)
       via  ed8d686171f140fd12164d2d34f65b4ab3c97645 (commit)
       via  1e32824c93dac7e406d1b35449b42700bf854679 (commit)
       via  c5d5522f83888a8b442aa7ff17738f3f688749fe (commit)
       via  688867daa34ade5075443c77535f80e1d2d76743 (commit)
       via  eaa56b3d005a20f945cd333664cf34633cfe5a7e (commit)
       via  236b6ec7a803f9024141e0dacc3dcf75583fea8d (commit)
       via  81bb03bbb092bace3bd8a44a6ca2862154503092 (commit)
       via  d36ded7d95a695f0412f6ccdb59bf55fc600e9d3 (commit)
       via  b8e90124c19177e0b6b33bd624e244860e2424b3 (commit)
       via  5cf1b7ab58c42675c1396fbbd5b1aaf037eb8d19 (commit)
       via  17d9827aa40e363650d1698fddba9204f27b5171 (commit)
       via  27f447c8b054b17d96abfba431568c1ffe017f0a (commit)
       via  219818389cc848dc2d67aff732b9790968851b51 (commit)
       via  e602f86dae29c62619b0ea8bf2ca69e1ce1b8295 (commit)
       via  57f7044d690d38cff90487b5883883a674d2589f (commit)
       via  383b6b2891226228ddf3cfd4c3dd8b17ea186b8a (commit)
       via  8cc8f4c008f640b7f13f8f1160261275ec14475b (commit)
       via  b6dd72042939ca62d9ceeb80385eedc7c5f0560d (commit)
       via  31e010330189f489c624b7cdb812ef3f33f8e280 (commit)
       via  70bba1b3f811261fcef30694568245e83cd64bc5 (commit)
       via  6c5f8867a45f40411594372bca09c04ddf5c0002 (commit)
       via  f1fef139dbc592aa4c7071d47e38e14487ab72e7 (commit)
       via  2c8b76ed408547789f2e26ad76773e40e316a392 (commit)
       via  eefa62a767ec09c20d679876842e15e9d3742499 (commit)
       via  58845974d57ee0cd0b261b00d1ededccc7bde105 (commit)
       via  d49e3c5e79e00b59e518c4bc1f71882adf721696 (commit)
       via  b84d1a0e0f13064b8dd68222c063565ac4deec3f (commit)
       via  3a6f9f395c141058fb732735beabe7dae1f84bb5 (commit)
       via  06a24c688282b61dd2ce5b6c00608bee34ae3563 (commit)
       via  b902e70583a9dfb1ee410e297e2da4c8b944ba8d (commit)
       via  09349cf206ee9e68618713b97e621b7ef2a6c0a9 (commit)
       via  ff1bd2a00278bc753a7d035fd5020ff936df1882 (commit)
       via  c89f3a2f43fd7fe70bcb199fad0ccf94364b1ebe (commit)
       via  4c86025464db4603ec07490169aaf4b77868057b (commit)
       via  842fc917163f0b8cb2a703a4c7fe078d944932e8 (commit)
       via  0eb576518f81c3758c7dbaa2522bd8302b1836b3 (commit)
       via  68cf1ccf20ecfcc1e06de69fcd50d13cf8b5e1e0 (commit)
       via  bd0c874dda60a0f5e235b653e1bb63716cb385f8 (commit)
       via  b6709a7001e4812c4ed774ef0ff3111fb654d199 (commit)
       via  9b4326dc093b71bcd77a527111ea6778795bf068 (commit)
       via  2c5b2fc19c21dd12747eb960baee65759847a118 (commit)
       via  0aa89cf84c78a9ee8b97a51c17b3982324021f81 (commit)
       via  d9dd4c5a7438c152f6c9ae2bcc4c9f5ee598728b (commit)
       via  03da93322b956e003882c09a8d4ea949f790dbc4 (commit)
       via  bfa93c0ee79935bf37d379065e219ba0afb0c4e3 (commit)
       via  7a061c2e82d62e2b275cb5a8d7460dce7d36f050 (commit)
       via  a6cbb14cc9c986d109983087313225829f1c91fe (commit)
       via  7cc32b7915532354ed7e2fd15f7ca5a9b9b64610 (commit)
       via  dd340b32df88083fdc17f682094b451f7dcdf6d6 (commit)
       via  30c277567f64d09c11cadcb173eef066efdaea07 (commit)
       via  ec2793914d1090db8c8d94a2f9b92ed97b1a6cba (commit)
       via  a59c7f28a458842b4edce2d6639639b17a85eb9f (commit)
       via  766db4a6100e34e6a29aa9c849b60ba80b551389 (commit)
       via  f7b5370a9bf82b0b480b75275349d8570ee83c4c (commit)
       via  12d62d54d33fbb1572a1aa3089b0d547d02924aa (commit)
       via  c38112d8b59bfb6e73b5fbc637fa9eaaae42c52d (commit)
       via  ccb4c0aa696918c579a0b80448fc93606152ec93 (commit)
       via  0fa8006ade38ac7206ff57934f3bb866be6407a2 (commit)
       via  b25df34f6a7582baff54dab59c4e033f6db4e42c (commit)
       via  715fee7daf2f966261d997e1b39888f14fb28a45 (commit)
       via  c3424869801ea8811106f8f97928ed5cd71efbff (commit)
       via  4e544fba3459913e23f86dc5e628665bd288c483 (commit)
       via  259955ba65c102bd36ec818ca4193aab311e983d (commit)
       via  1f81b4916fa3bd0cbf4f41cc7ad8f13450aa6481 (commit)
       via  6d6353cea42ed088df3c2c90c4c2741a1b8b2871 (commit)
       via  7efa61c40b94d3234dd7fc79a0fc7ae0f1b0a105 (commit)
       via  5c3a7ca7b3b28a7a163b0af3cbadc3d8fe7a702b (commit)
       via  54c6127e005c8e3dd82cd97d49aca23f5a5d8029 (commit)
       via  b6261f09b53af42a26d88fd50d74ab1e84524cce (commit)
       via  8634aa9cab1c2205629540b4d99b88847148bd80 (commit)
       via  d1a1871cc6c93ababba62f42bcab5205320b8867 (commit)
       via  2a5c5383e3df0e625367bf85b740f62bf777b211 (commit)
       via  af10f1ef696ee94f817bc389e0e8b6cd08234333 (commit)
       via  f16de89251e4607eb413df666a64022c50478a4c (commit)
       via  3eb0dedb8a5d9835b394484c6112a4b2fcbe9d51 (commit)
       via  2f8c4b3da6060a9b57e944726dd61cb1b2a19906 (commit)
       via  4e93ba217318854742144bf0b8e30f4c3614db92 (commit)
       via  ee468e8f02f1cd1bcf09da75170ed62dc230b70e (commit)
       via  433f29fd44d8dd6c940e49ee2657b769d70781fe (commit)
       via  f0274b7451761b2dc48c0be148ecd8563c9800da (commit)
       via  45ef63790b34ebc2d26081609bb168aefee800dc (commit)
       via  38d80ef7186ac2b18ed234a825894f5f78fc90b1 (commit)
       via  88bee2515653d3b5481608bc92a1956c7ea7cf48 (commit)
       via  e9286ce511be095f2b16b1b7bc503b1e4377593d (commit)
       via  723a6d1f333f1d513d5e4fe26a8ee7611767c9fc (commit)
       via  88fe1bafce118f40d256097c2bfbdf9e53553784 (commit)
       via  cbf08d56345922d754182b941b84b18bfddabcda (commit)
       via  84a95705e1e8219187e75433baec2fd2fc8ba2fe (commit)
       via  aa5fd84d438cf165c9836fa545d15c33781401af (commit)
       via  fac67afceead36ba7296e194942811d9ed3b437b (commit)
       via  90b740caf4cc5d207dfa2ac98f1c73d9818792e2 (commit)
       via  0ea828cb5c74b0f9a254aeab2c7d31ff214371e5 (commit)
       via  170a0661dfb17014a62cd2eeaaa99e408bc55a14 (commit)
       via  b12f4e55007ee2e8130991f322e782bb31a8a289 (commit)
       via  18083458382473b414a3fc7f57623d2241f487ef (commit)
       via  fbe4ee1f76237fdd586638ce1ded4c6e5bd0bf1d (commit)
       via  9c53309978b4a4bf684b3abbb853876c5413f875 (commit)
       via  8ee5844e8dc3ec7d99a5890bdc85f54afd8886b6 (commit)
       via  c9ad781ebbaebb2e57956ac9eda542eaa88a743b (commit)
       via  9f441d72a245e3ccce2ee014adaa0ad62e7b0d29 (commit)
       via  51c4b53945599a72d550d7380c7107e11b467d5c (commit)
       via  efe8aa23b59448214ef826a5910e52bdf0ce0015 (commit)
       via  4d39f72b87677c194d282a9e93de67dc0adfb4f3 (commit)
       via  ece8bd155e646869b10fd08817ee7cd71c699c61 (commit)
       via  b59f898456b33294d71a333d3f3b4fe9dc81e3dd (commit)
       via  84d7ae48d44e055cb16e3900cf2c4b2262f6a6da (commit)
       via  f8b10842465d60483e3bc9827e06115ea8081bfc (commit)
       via  a4ff990c9b0136c97b101f42dd5498a453fbdf25 (commit)
       via  06341cb6cdbd5ff57c376f7b0b25aba4a35bab86 (commit)
       via  54aad8af04350eb3a45a4bd6623681efa2f8d2fb (commit)
       via  61aaae27e12db2a00cfde674931e5080e733e6b3 (commit)
       via  3089b6fd6eff650dc06c0698b80eae1595986677 (commit)
       via  3a9dc4fbd7dab867829ba3299d86c2f5b58d864f (commit)
       via  5859f177250685fbd49c9562ffc3e984b9d5ebae (commit)
       via  4948e0c8965c3d39b6e1bcb1bdb12b9615260a27 (commit)
       via  59e2ceaf7b75c38391c518436a70ac3d41b8c8be (commit)
       via  4e3c6c5e5b19be3a0f970a06e3e135d1b2fae668 (commit)
       via  749e1c9c0627c0a20dc824ecc8c475ecee613d8a (commit)
       via  8d8d6bd981771edb3011afedc5e62a59d78d7826 (commit)
       via  03e9f45f8a6584a373f1bd15f01f56d9296c842a (commit)
       via  cb4d8443645a5c3e973b4e2477198686d8d8c507 (commit)
       via  f847a5e079ceae0346b84fb320ed06ce9b443a63 (commit)
       via  05512e090c6c3cb852cebdb85ae7c12e8001603b (commit)
       via  c35f6b15bb6b703154e05399266dd2051ef9cfa9 (commit)
       via  3f2864bf1271ca525858cf3e1fa641e3496eec59 (commit)
       via  f8720ba467d8e107c512160a5502caf9be58a425 (commit)
       via  38af8a4225e8c82564758e8a5629da438220bc87 (commit)
       via  c5e0db2b7d8fbdb13548e01310f623f131ea0e9c (commit)
       via  26c7bfe851f00422beb442a77d25cc0887557b79 (commit)
       via  f5239632a06383f2b4f6825cb6a006ceb8bea417 (commit)
       via  680f05c35753bf1f70392d25b1e6310cf46476ce (commit)
       via  b12351c21ee92a13536aa89331cc73bd166dbe5f (commit)
       via  2e1dceedf6a4f661a8d7e57757b28f9f6cb1a9b3 (commit)
       via  df69ad0d0231218610f68ecb2b1953ae7f28fa68 (commit)
       via  5b713ea8e5fd35fdb1ab7ff953e010ef9b60f98c (commit)
       via  02b2e71bdc1564f4272869bb5676727af809870f (commit)
       via  8d1942a3b7516e8161b7f54888da2a4a4d27484e (commit)
       via  856ff83ad2b97c136de1103a421547bdcb332e74 (commit)
       via  7cc9b08f18967fa1a694f5b7e320aad62d0d3e88 (commit)
       via  25e56e5d1bc9197e882e3a42285d0efad21a51f2 (commit)
       via  87d2a8766e610a0dece7d86268ac9be4122d6d82 (commit)
       via  64ac0166d5ea3b565f500f8a770dfa4d7d9f6a28 (commit)
       via  c86612cd4120b9ad3d00978c04ea252e7d501e44 (commit)
       via  c1c2ddf5be4556e6e8cd52a314ddd6d026c7e540 (commit)
       via  ba50f189eced101999efb96672179aa1024204e9 (commit)
       via  6906362bebdbe7e0de66f2c8d10a00bd34911121 (commit)
       via  83a58b817e5c0432d543b66208f502b059fdbe13 (commit)
       via  40126733cc69634035b0cca3a0c90ee3a606ea3b (commit)
       via  bcafb8b98d5df77108a83a6bd8b7746f7c2616d7 (commit)
       via  4ef59f25a452f934408a9ba837cea9b7fab0be48 (commit)
       via  3d069e2745070bc23f14c845cb7d8116d919f0da (commit)
       via  230df584722d08705f2cb3b99940b764b1cb7865 (commit)
       via  fda403b09887a24403c3a90d7ad6c95288f2d641 (commit)
       via  88095bed9cbc3e39c61eb0ea7dee1646ff13ac7e (commit)
       via  b557ab47f3355f5fc7d4f87dfa9e4a15e7e9f3e3 (commit)
       via  04b04226b726b6e1fea6bba970556b9ed5cc3446 (commit)
       via  3a838eb454ed0de4f073b99e94e02014eca63a56 (commit)
       via  748c3e1aeb833012a19b651af7d98757a8ffc50f (commit)
       via  a0e04c0ad837b4b42caf139573f2a95c86cdac76 (commit)
       via  4e12574323ca3db3e985acee0540c603b2b33124 (commit)
       via  3fc53ba91b92ad40ebbf46272f57a45e3d2e3a27 (commit)
       via  fcb2409598d37e2078076cf43794ef6c445ac22f (commit)
       via  c6d2a365580709981852007cd0a9a3b32afaa5c3 (commit)
       via  da8bfe82aa18a67b1a99fa459f48cea89ee2a41a (commit)
       via  7980a6c8e598d34f5f733f5c6c3ca83c0a0f1187 (commit)
       via  9c62a36b0ebf9ff4ef3dad1f4d91195d301348ed (commit)
       via  2ec9338d84714ea670ee888f1edf5a4ad220ea9a (commit)
       via  1d907966f7f0fe7089efe46d8b808d9115f0d167 (commit)
       via  93327a85ea63f7043c49a0af2384a1e274ab1dda (commit)
       via  75e756cdf9d5b08e859afac5cef38bd818a90e60 (commit)
       via  778bd1be6ced7f4a135e2a6bcc7414c4e4bdc27d (commit)
       via  38c8e9a9ccfd7fd57bc5fa5090c86cf7b7920d28 (commit)
       via  ddf9da5175b1182810838861f1464fb05fe00104 (commit)
       via  8fe581570c2ef4f881762f4f22ef4f66c1063491 (commit)
       via  2812fa5cb0c2013ef1696888651390aa71a76b4a (commit)
       via  b131dd71ce147b4efcece9dd8fba16c51fefa492 (commit)
       via  eefc291d240bc1fe15d131df9d463343b0333d3a (commit)
       via  84d83c1d8979e2906971af79f2e41083299beb7e (commit)
       via  255bf5b18e2b0e28a65062e87dc2d1212376bfc2 (commit)
       via  e2ada81cd2a090f707147abdb73a90d44db2f2b0 (commit)
       via  0953b3f5d7ed1b4a25362f9a2d1a41eeeda8efa6 (commit)
       via  8d380bb47dd24c7fd2c4880a4106835d871bf4d5 (commit)
       via  77ba8639c274865c762eee688383c321f18ef889 (commit)
       via  ecf3f4b962026aa9094ee321b03ee32df2fdf1d2 (commit)
       via  30df43575158b0cb294ec49a8463fe8b49593e62 (commit)
       via  4c0accf0a591b0422c84216150e1b9b4e008609e (commit)
       via  1f051716bce3d7aa2545722ba41958df9758cadc (commit)
       via  10553ed4ebb5b949ae74d277d398d2e8a3909ea5 (commit)
       via  d916aef6af6bb8506b1ff4756054a1697410982f (commit)
       via  4700bada6282f5ad10b53cd8ca7cc03b8fea791d (commit)
       via  ef64723fe9638f8d56f58fba44a149ac620eadd9 (commit)
       via  5de6f9658f745e05361242042afd518b444d7466 (commit)
       via  3f847f9d35bf2bf9ee0d957ea1aa9ffb27a32cdb (commit)
       via  df047c5ccb5c81f9a3d36f7fc38a19bc7c8f2ac2 (commit)
       via  a7346d50ae5389ce37e35a7131f0f218663b8c68 (commit)
       via  ad91831c938430b6d4a8fd7bfae517a0f1e327c1 (commit)
       via  43da3c6c1cc7cb5fcb1dbe2f983a53e883408d1b (commit)
       via  fb3d0e1146e9e5a36a9402690a09e7629408c677 (commit)
       via  27b3488b71a5c3b95652eab2720497d6d055346e (commit)
       via  087c6def9087019640a437b63c782a5c22de1feb (commit)
       via  3b0ccfb2f23961e4cbddb9d0873bab0f4c1d4c3d (commit)
       via  0a39659638fc68f60b95b102968d7d0ad75443ea (commit)
       via  2684301690d59a41cd20d131491e0714d156fa7c (commit)
       via  5baa7aa73ad8d8d5250990a9e330b9b746659452 (commit)
       via  7e857cbcbd5dfa64552d15dee5ed01ca39bf8937 (commit)
       via  1921e1297dfcb878b9417edefe4d87639c827948 (commit)
       via  fa4c8fa8acbff7f4defc768e50a453bc376c56de (commit)
       via  9b0785b11da612abf0e60f39950ebed9977b2e65 (commit)
       via  872bd5756ba8b5daeeacedfcd4ec38bc50035ec8 (commit)
       via  67d8e93028e014f644868fede3570abb28e5fb43 (commit)
       via  4ff5e524a7f79ad7f4513ebed3ca0990392263af (commit)
       via  5157876271e945703ee699f07442ee1a72bba362 (commit)
       via  73df015104eb5ac8934ff1176c24079e6e9b09c3 (commit)
       via  586d49827ebaa2cf2c70dc030c5830afb1fb89f5 (commit)
       via  2b755575c9d0277980008df99f92c38dd6b3a420 (commit)
       via  38d1a8aa943424e1a0de0503ee8aa961a95d0e14 (commit)
       via  4579a2a9f43a38144539447bb5076bfcbaf8b6d8 (commit)
       via  58d7fda0fd2efc2d4bccfdcb55ce6ba42af83aa0 (commit)
       via  8e5b40643255bd93c6edda9cabed39f46b074b0d (commit)
       via  7f08fc3123ef7d26a2e61dd29455c07510404a7e (commit)
       via  af6328603521584ff62b25a6f86a923bba5a4f5e (commit)
       via  9d48d1964569b49be17afc3e20085a23544a32de (commit)
       via  28988a78d3b80c7f1080fce696acf176b74a29fe (commit)
       via  5c6391cca55baec236b813b4c2e2b7699595559d (commit)
       via  08b5add9a6e405342c0c8bc3bdf5d552ed45df0e (commit)
       via  a176724d99c073f8e547dea2675a5b7d1df70515 (commit)
       via  a9b769b8bf12e2922e385c62ce337fb723731699 (commit)
       via  6318db7dc90cb6656cc2a1f8e875f2258f6a4343 (commit)
       via  35a0136d56de7faca280666ba40bb1b87a85fff6 (commit)
       via  3a11f2fd5bbe98fc555bfdf1cdf9019f7222e3e9 (commit)
       via  b97162729a3ad4214e5f6b85452a27904b8f34ca (commit)
       via  ad36c799ff07d47ebd5c861c63e9feef50408e34 (commit)
       via  9d3e78f0d8075ad62391ed005e1e82f79f05e2ca (commit)
       via  c5e0ebf85ef50e61457f3b99a05109a92b328573 (commit)
       via  8216d5dbe1ef23d56ba589fe1de619a601bada4b (commit)
       via  1c834de994f51a1fb98add648dad49abfea2c403 (commit)
       via  9622aed753d953a763a9c0ac25cd7868d257bad7 (commit)
       via  01d8d0f13289ecdf9996d6d5d26ac0d43e30549c (commit)
       via  7fe505d131d2a13a6a412789474d92493ade65dd (commit)
       via  353c08576b5c7fae46be834cb815df744ec2ba96 (commit)
       via  dc9318330acbd36e07ad5a4e8a68c9a6e2430543 (commit)
       via  e4a17a0630a6460090c5cdb562e02ba992a74fa8 (commit)
       via  954143a2748110c720d28df49159ed4f0bc1a1a2 (commit)
       via  21bac503aa78b1d0cbb6993edc083fbc508dad16 (commit)
       via  ee826c177bef06f22cdbbf82044085972bfd8737 (commit)
       via  4111989bb81641ee36fa94bf5cb181aa18f5477f (commit)
       via  8cfa0f76baf92f82bf2865b3557c0a2094e81cb4 (commit)
       via  bdebd1afa4bf82120c66d9ee8d8cab500ab0b606 (commit)
       via  68653f1c822916ceade94511168f87adff74c235 (commit)
       via  451086b203ef3e4611487630225a7650ad9322e7 (commit)
       via  c0c7b21ab57bb9445329fed9e1451c534aab6a67 (commit)
       via  59add6ec0f7e96ee81a7b9970228b8f795b01997 (commit)
       via  f6463fa6e74a32e3fb28f150247e11d0fe073782 (commit)
       via  1b421982a6fcadebc72d3d6ee7a4e34eec61a25d (commit)
       via  45630ca90e823247c429f82b338244a9bba9baf4 (commit)
       via  36c6035855db0ae87a64a0d169e0230d936e3e64 (commit)
       via  5ece3fe5e40efbcf7d727650475c35850624cfaf (commit)
       via  d88becea33630677dbb5123cd72fa8695512311a (commit)
       via  171088e69ff96a2e242cfdf98e8d1f0415d4c172 (commit)
       via  568a8cc472f3207b44b92428e7ac40338d9ede37 (commit)
       via  9a8667331d9a7179331516e7bb1f3aa942bf8218 (commit)
       via  4c98f3dc47545794daccd4978103f6b98236ad82 (commit)
       via  2dfa0983e4680f321a3d4f1bd0d826abd88f455c (commit)
       via  ec8fed8c805b513ea15ad76eb380c639dba88548 (commit)
       via  ce3be84b9f772fda5f08947fec92764119989019 (commit)
       via  d60bb44c243f27053589b5501529b0001404373f (commit)
       via  92dcac243a4a2924bab85d1519a0c7a20853f9cc (commit)
       via  fb7c63f65c121b372b1ea23a823cb17afdcd1dfd (commit)
       via  2bd6dc4ac6ac61705517df297320fa79b308b9e3 (commit)
       via  58d6de47f6e189ff0b648b4f2f74e6d5df85d749 (commit)
       via  2ecb4add323e3c4ba56641d28e35dd79013ff9cf (commit)
       via  540c6a5f5b25d935a8193fd835c1ba83dba02fd5 (commit)
       via  507b231626a2e0289288f48b1e4613b569cdd8b2 (commit)
       via  ea8bdd6cb6894855f109b8d19ce104ae9a4b9cb5 (commit)
       via  4a7d63179ae732ede6bdc77c393a1cfd9b0b58ca (commit)
       via  ba9f03b99b6e1dd46d9b11eb1bac629789c8f94a (commit)
       via  6ea996c67dff319e332b465ed450ee50b97de4f7 (commit)
       via  bbc661e3c38f02b4a1fb50bd4e058a22150b0087 (commit)
       via  373a792a4706be2619dd1d1820f949858620bc77 (commit)
       via  f9b1950752ff1d3041d776a5d50ec2d0ddb8065a (commit)
       via  d63056e7cff35f58898a9bdc8d5cad589689590c (commit)
       via  fe8f3314300936f71cc89535ecd3f0f3cad3804c (commit)
       via  b4ae924f504e9749989059a14e6a5dc830c99e81 (commit)
       via  20871297d2aaae57acb79e987ff80a9020d608d1 (commit)
       via  2384bcf387e93435658ec1ab92addbf28c9ab640 (commit)
       via  1d314b2544b8af8a936c90e00a0dbbb605410952 (commit)
       via  2bb551be853647c25005d1ab167e17ada7a5bfc5 (commit)
       via  e3c81bd07046903b4b3bff8325024aafcdb35cba (commit)
       via  9001f1db99dfff10957dc2a971e7466a496f0f2f (commit)
       via  616fb3be8c0b3c266eaf0aa4ae399918fc7992ef (commit)
       via  7dd0238dbd4ed086ca7217ec50d8f0a5be3179f3 (commit)
       via  7a9a19d6431df02d48a7bc9de44f08d9450d3a37 (commit)
       via  d72e84456e23ac19c2c12a186ba429cd2e4985cd (commit)
       via  deefb84c32a289f8deda6550518a48b01a6032c0 (commit)
       via  83f8d6de769a33f51b83cd81efe178db162e95e1 (commit)
       via  db9e3c398b854c83a65eb227ab9ff40dfae1145b (commit)
       via  77030a4789285a3f08fbdd9621a384a9e008f4a8 (commit)
       via  a030033e5a53dd18157509c6c101340688d16011 (commit)
       via  13e236a3d647d15858b061c7d96288bf7407e090 (commit)
       via  a7fe0d5982813f092f8a497d350620c02b995649 (commit)
       via  485e0ba7f7fe11e4d28e3eec2be835157521a6e9 (commit)
       via  6a55aa002c8f3b701dbb8291cd9a8e21534c6974 (commit)
       via  7cdda20613f7ed7b18e7fe210ae0f6a87054dbf3 (commit)
       via  745ebcec892cb27feec663de9218ae3647c7b8a5 (commit)
       via  1e702fae4c9adbd7134a739dee28c868a15f0b3e (commit)
       via  44bd4bc6dc7df56905071933a542e00e91f84837 (commit)
       via  006d0fab3f44ec9caa2b23da3866bbbd841cd5d3 (commit)
       via  68da925f226966a2760a193e9f9a3cdbdfcfacec (commit)
       via  09e8c50958a1fca313c2be427c2991c39798f90f (commit)
       via  d1b580f1780e5ebdbbf6fe8655cc923fbd5c02de (commit)
       via  98e74ad62b23ce33f66e3841431511136bc1c2f8 (commit)
       via  0fe4f0151ae7a994aaf305e7985d4ba9f992e482 (commit)
       via  9df1f04f8b1f7091ab32dcd56fb6e47e3e96d5a7 (commit)
       via  691c232b2655673ac352beafc0bfba4bc966f8f8 (commit)
       via  6ad78d124740f1ea18f6f93721ec6f152364e878 (commit)
       via  5253640054d48f7816aa00c803f5bc593c0c12c1 (commit)
       via  ce052cd92cd128ea3db5a8f154bd151956c2920c (commit)
       via  6dfeded7b6f2f78a2d45fa54543a5962bdc6c035 (commit)
       via  9bbf7837ed869bfa42849f433367b0471bf7bc58 (commit)
       via  810c79d6d9b8efbc12ec8e1ad727cf002f2dedc6 (commit)
       via  c74d3b7f393f3934bae22fc9d3a4a49e2211aadb (commit)
       via  ed6fc7857e3fe7d64f19a0bed27226964009f095 (commit)
       via  e074df43e95dc002374de30503ba44e203b04788 (commit)
       via  b06a3e2ba1febb9e34458c5106f8d1629a191d5f (commit)
       via  56af86bdab9c9700a13cc7d622653d34cbaa72f3 (commit)
       via  4cbf309be8a302afe3bc041da11c24b593464157 (commit)
       via  b3bcd825cfb9c19a62a7db4d12717e85aca0b1e8 (commit)
       via  3f5a0900a568436b011fc14b628b71bb130ae5f7 (commit)
       via  6df7102965c6afdec6f621175f9e91a56ee42a67 (commit)
       via  81613a741bcc9cbe909c814fab9ca99c1a1fc2fd (commit)
       via  cc004ec0ff327ca300cde89ffc252a9b1c588bec (commit)
       via  c454dfae8988337bd10bfe0551ee62a267049dfe (commit)
       via  afde75c1fe9ab3fa35acdf1a3b5f80ec389e1190 (commit)
       via  5de7909a21a077238567b64e489ed5345824b2a0 (commit)
       via  b4a1bc9ba28398dbd5fdbe4ee4f118a2faf59efa (commit)
       via  3ce7b09732207eac03998fa5e267672760e475c9 (commit)
       via  d9f4f26b0f2c73eddd07b2a4368ae1b238944b80 (commit)
       via  59c8ea50e972e7753c96f6bcf46fec48e694daa2 (commit)
       via  0f7dd030eb47912112b8774424a62c5561af16a1 (commit)
       via  fb441884baa9994093ed380aded84e707c3d34b5 (commit)
       via  6f5ca0bd47ff6a9b1670f38d6a68a1a7b1a01a5c (commit)
       via  ee552335b8177318be98e6a4c5d941aa41091a2f (commit)
       via  edbcbf0ab15f140b96efab5fae808b35e705cf67 (commit)
       via  c4131b7a0c4a6d666a35847f8cce3d099b7a9949 (commit)
       via  f3e53fe5cba59946ddcf24be423eece1ab596769 (commit)
       via  a51d6b87331f0fc991b9926a9101e081668ebbcb (commit)
       via  e0215095818d30e80b59e99689f2cf0dfbbae841 (commit)
       via  10cfb9ccd5b2eb489b14804e0ea9a73c80e697e6 (commit)
       via  acb5dff4449286422f23a7d5867b3bd792c888e5 (commit)
       via  253d1fc351fffc8a0b1d325044854a2defdd7223 (commit)
       via  d7834356a301b162fb9757427359d0dbac95cecf (commit)
       via  004afad6ea3fba7c8dd7730428b50fd770daec66 (commit)
       via  f20be125d667bceea0d940fc5fabf87b2eef86cd (commit)
       via  fcc707041d663b98c1992cdd1402cc183155d3c0 (commit)
       via  da5d5926cb26ca8dbdae119c03687cd3415f6638 (commit)
       via  0314c7bb66b85775dea73c95463eed88e9e286c3 (commit)
       via  b8cecbbd905c10d28bcb905def7160d9e406dac4 (commit)
       via  7a31e95e63013a298b449573cc5336bcd64a0419 (commit)
       via  e18a678b62d03729f065c40650d7183e2f260b22 (commit)
       via  1d1a87939a010bd16ed23cd817261e9a655bf98f (commit)
       via  c6948a6df9aeedd3753bc4c5e3a553088cd98f63 (commit)
       via  db0371fc9e5c7a85ab524ab7bc0b8169b9ba0486 (commit)
       via  e906efc3747f052128eef50bed0107a0d53546c8 (commit)
       via  d86a9dceaddf5a2cee44170e6e677f492df5e0ea (commit)
       via  4c2732cbf0bb7384ed61ab3604855f143a0c6c5d (commit)
       via  aaffb9c83c0fe59d9c7d590c5bea559ed8876269 (commit)
       via  e8a22472e58bfc7df4a661d665152fe4d70454a6 (commit)
       via  2c22d334a05ec1e77299a6c55252f1d1c33082af (commit)
       via  8a24b9066537caf373d0cfc11dca855eb6c3e4d9 (commit)
       via  7275c59de54593d3baca81345226dda2d3a19c30 (commit)
       via  bcf37a11b08922d69d02fa2ea1b280b2fa2c21e0 (commit)
       via  a142fa6302e1e0ea2ad1c9faf59d6a70a53a6489 (commit)
       via  ae8748f77a0261623216b1a11f9d979f555fe892 (commit)
       via  d0d5a67123b8009e89e84515eee4f93b37ec8497 (commit)
       via  a9a976d2a5871f1501018d697d3afd299ceec5da (commit)
       via  df9a8f921f0d20bd70c519218335357297bffa7d (commit)
       via  e95625332a20fb50afe43da2db0cab507efe8ebe (commit)
       via  28cad73dff9dae43a38ad7dafbee406c690fb77c (commit)
       via  4de3a5bdf367d87247cb9138f8929ab4798f014e (commit)
       via  aa108cc824539a1d32a4aa2f46f9e58171074a9e (commit)
       via  691328d91b4c4d15ace467ca47a3c987a9fb52b9 (commit)
       via  c06463cf96ea7401325a208af8ba457e661d1cec (commit)
       via  c074f6e0b72c3facf6b325b17dea1ca13a2788cc (commit)
       via  daa1d6dd07292142d3dec5928583b0ab1da89adf (commit)
       via  e7b4337aeaa760947e8e7906e64077ad7aaadc66 (commit)
       via  0b235902f38d611606d44661506f32baf266fdda (commit)
       via  c19a295eb4125b4d2a391de65972271002412258 (commit)
       via  9261da8717a433cf20218af08d3642fbeffb7d4b (commit)
       via  d4078d52343247b07c47370b497927a3a47a4f9a (commit)
       via  1aa728ddf691657611680385c920e3a7bd5fee12 (commit)
       via  1768e822df82943f075ebed023b72d225b3b0216 (commit)
       via  326885a3f98c49a848a67dc48db693b8bcc7b508 (commit)
       via  3e0a0e157bc2a1ca7ad9efb566755ec61eedd180 (commit)
       via  93a7f7d1495795b731242e270b6dc76b1ad6b0dc (commit)
       via  87e410c0061df72fe69fb47c7456ae54c609b219 (commit)
       via  1ddc6158f7544c95742757654863379fff847771 (commit)
       via  0f787178301c7cbf59fc7c516ebe920a33e22429 (commit)
       via  9b6993b6f6507fab1bc8956f727cca60c8c9243a (commit)
       via  7bda7762ab9243404bbd0964908b3365cd052969 (commit)
       via  7cf7ec751e4f776dbb60cd290cea4fb217173cdb (commit)
       via  d5ded106a85afaf695e59941bd382bca4811fe46 (commit)
       via  c4ef641d07c7ddfd6b86d6b5ae944ab9a30d6990 (commit)
       via  e443a325b31edefe9cd4da71e10497db6544468c (commit)
       via  cddcafd790288f5e666198effa142132b6fc43fa (commit)
       via  ab5085e81007711f9d18ed77f3d78f51cf37545c (commit)
       via  5e621bce015d2847104303fba574989fdf0399e0 (commit)
       via  7d5c3d56743fb696405f509663b3e1558fa72e25 (commit)
       via  990247bfd2248be5ae4293928101eec87e1997e9 (commit)
       via  e9e36557849ba6b650e503841596bd31034c1936 (commit)
       via  39a0a5c65d0802f40ab428474b1e6d981a91fbce (commit)
       via  0c9db8bbeb7187218a5b47d82df18e38128d06a3 (commit)
       via  9882d600d0bbbc115671b12646e690ccddbf5348 (commit)
       via  59b545e90d30444a97c8e925569d240c819d42b4 (commit)
       via  7e89c625c5d12b5816c857d0c0910922f8803f82 (commit)
       via  b9f87e9332895be6915e2f2960a2e921375e8e7f (commit)
       via  978ae99ac4aa211ba4ba960f56bb6cdd84b648ae (commit)
       via  2e60562cfda15fad37550ce5996e942084131d1c (commit)
       via  2f49e3eb0ddf31d601184b516b7f44ab4ea6eece (commit)
       via  d71b7da05d3e1a82047e35c2720c759bdc0fb44f (commit)
       via  a577b387b7e5c9c8afd371767fccc85009e84485 (commit)
       via  8e82cd7374cda9ef55f88504a94d31b06d7e1bd4 (commit)
       via  1351cb42c92cd415003adf6234d96507c8a6d2db (commit)
       via  575bd3ec2fe918257cb448eee8ebbff269d85431 (commit)
       via  51a7361aef92b8c6caad857ed09f0bea0f210db6 (commit)
       via  17a87c6bb9d16e992fadd47b11b3eb26af54ac69 (commit)
       via  2cc500af0929c1f268aeb6f8480bc428af70f4c4 (commit)
       via  e021b84f7fc20b3e3927093ed87e9c873d33a443 (commit)
       via  c46b0bc28c22f2ae4b46c592f450e745774846d4 (commit)
       via  7740b9810bc093a9083e8c3404afc627c8b78242 (commit)
       via  b51f0cfcedc2499aa1c0b85aaebf2fecf244c291 (commit)
       via  69eb1a250699f481427c2d12abf14314fee9e6eb (commit)
       via  62432e71ef943744fd4ca9ce216da1b0a7250573 (commit)
       via  005c77dfe53b54cef92ce51d91f615eb9c2769c4 (commit)
       via  ecffd3a7f26c9a1590994bb176494ed4f4ca7a64 (commit)
       via  ce3bc8504d765ecc9b453398efb18662bd4f277a (commit)
       via  94fc6d8d303053c47064c9408947cd49a8e11975 (commit)
       via  0edf51aca0ea2d75ed9d96fb612c1005965ec64f (commit)
       via  ba292defc14029971d5e9043881ddb98c994cfdb (commit)
       via  c5cf3cc081042fec0e2baea7cdf7f22a8a84664a (commit)
       via  779e145d8f15ad9975f6ca689e6a595ea0a3de4b (commit)
       via  adcbbb141bdb09a6fd999f3369e15c2881f843ba (commit)
       via  80014655d76e758868e8e1ed36472be9a606eb2a (commit)
       via  959dc163810ac286e01d0163624f5bbad5b82c55 (commit)
       via  1d74428fb7a817790c397338db92d102e2113e1c (commit)
       via  d5e24e94bbd581098e460fc3a0b437478340c876 (commit)
       via  4cd96de7e7d4ac12c38b45efe7b3ee0ed331d3b9 (commit)
       via  914fe9bc05003defeff70acb84a52e86fb9ced4c (commit)
       via  b22882ae78f0e5d38d4b6ace0725bf0ae5bc4803 (commit)
       via  f83a47d3c260982be4918a3d9f5d0b480503b131 (commit)
       via  8c84a6865c4b09eccf41c9d2e91a030c941bffab (commit)
       via  c6ca831b3f171da96fad75c21dffbd2bed71e297 (commit)
       via  8ce8e05a403440e7f2323e9d43dca08be1cf8a94 (commit)
       via  a9ddfa91f81e00400f04548e71ab9519892a6dea (commit)
       via  04749187843604f51ddcab4f53811dac9a9ed8a0 (commit)
       via  414b25d4bfa89e0609cd3c8c3a6e610681f4c929 (commit)
       via  f57e8133a7af31a59578ac2cd50dd20418cb8fbc (commit)
       via  85a14b1daffb3a20e9e510b73d25c71ba95cc350 (commit)
       via  774a56a8beeef3a73258910b12cace20443a1bcb (commit)
       via  89bd1bf64a6d745f4276fce3ee8fa4e050736ff1 (commit)
       via  f429202995ebb0dbc86d41c6d707815186832063 (commit)
       via  f14bc0502c3c4d2ffd609b110771ca1fa752b68e (commit)
       via  f75d5bd488669426794d086b80568ef0a7a4afe6 (commit)
       via  d719b47c4131e2120305cee60395c0a88f5aca25 (commit)
       via  c7db1351d3b1c25bfc31ed9e7b6b491e6bcb1555 (commit)
       via  ac15a86eb62832cc22533bc33b802ea297666ad5 (commit)
       via  0af72968bfd192fa418551ae75def455adcfbb4b (commit)
       via  977f822d94c59bfd9d56373404291fc85218b1d6 (commit)
       via  d00042b03e1f85cd1d8ea8340d5ac72222e5123e (commit)
       via  0081ce40b832f4c5abaeb0316736d772aec3f08d (commit)
       via  f03688da19c21b4d46761cc4ed9da981cebe43c1 (commit)
       via  eb8ba927115b091bb407cbc29ad2d07dfed318f1 (commit)
       via  b19a36e30d0d3829c68f2e0300ea1487da242af8 (commit)
       via  12b3473393fb7a471fc7d928476b0ba66da145e9 (commit)
       via  cfd1d9e142fa2fd8b21f74de0e4a0109e0a04439 (commit)
       via  67b352b3f7cf736c9aa7c1332aa7814911556ad5 (commit)
       via  822a00aee0d7feb845e28dad7dccb552d10d83db (commit)
       via  c293f639684d2c6625b7395c995aa813eafa5fa4 (commit)
       via  00686a614cca93f007335d01c06d78cfd212d973 (commit)
       via  5951ef6faaffcff62d9a9963260a932666e3decb (commit)
       via  a36be891057f7a2505db032768264c79f37f05e7 (commit)
       via  23b1e8bb169e058dfb11b826b1b59d606d64ce20 (commit)
       via  f82dc7b09f470f79ed2bf099216fa64c76528d3b (commit)
       via  a53c7d7c450de09ceb04b47cb59450225827bd51 (commit)
       via  5b7dee0548f068e626c0bf5d116fc506d2af92a0 (commit)
       via  7990857c32cbb49f4bedf805f86c1b718b3a70d0 (commit)
       via  a03d7d9aae8ac258d266c66c62c63e03ff5d2558 (commit)
       via  5d6fde4aa0d2a93945276dd722be48e05da72faf (commit)
       via  d14895917e4841ee53c46f7ab3f46c3f19489069 (commit)
       via  eb5023d2a38e0862e2d9a5f1ca4a3788fc131405 (commit)
       via  1aa26c98d1b827a80bad8abd7f8bb25c26db72b7 (commit)
       via  f6a1807c25d85a0ca762bfa276ebac4a3430e7c7 (commit)
       via  20483389cb90e4f46486be925b896c8a0438191c (commit)
       via  4102716ab6f3cfaa979151029c2859701dfe2ac6 (commit)
       via  8975d286a6de827a02b073de32570602cd9cffbc (commit)
       via  65e4595c21bf9c01fb0b7da61577ae8a79d29c30 (commit)
       via  19c8c07e6e1601180f85f7aad145f00112f3f8a4 (commit)
       via  87090907f39983b744749017cdac3fb957d8d0c0 (commit)
       via  2808941eebec54dc7c4981f5a2a0e149d452b8ca (commit)
       via  10b192574ca253331298bbc4b05ef70d2cb927d1 (commit)
       via  9351dbcc88ccdd6aa83d72f432f19a76c031124b (commit)
       via  de06b256b36f6428c5d914266c4e91c25c69ded5 (commit)
       via  d4867b8dd18ddbee0b30040f569eeac99964343f (commit)
       via  b5347a6b22c2d82ffa57c8302c81ee0f25b413a1 (commit)
       via  848cfc635084c5baccb275ed4995032d3ada2d59 (commit)
       via  46b961d69aff3a2e4d1cb7f3d0910bfcc66d1e19 (commit)
       via  52357dbe51bd015119a798a4f8e7244a3e1efda4 (commit)
       via  97153d16eb9ecb7281ed9dc76783091964e769dd (commit)
       via  56083614ae0e8c5177786528e85d348686bf9bc2 (commit)
       via  c9d7e29600f7a80094bcda2c3bd87d8f07d813e9 (commit)
       via  2b6bcb84a17fc98ea0ea87df65e6a77829857ecd (commit)
       via  cc6d6b14603924a4ef2d86dfaf758447cca6a7ff (commit)
       via  69642fb8f55cb4741f977d3fbaacd5d12d742625 (commit)
       via  3027ed2010e5e27ef6e8ba519b789269100f442e (commit)
       via  fc33ec0a47dce3e94fa7179d4d28d7fd050a258d (commit)
       via  86257c05755c8adbb19ce684546b718dd48a5ef8 (commit)
       via  5f13949918d125f851bd2ba8ab092c301835d3ac (commit)
       via  9a98be99edd71e540bd65631dcbd3d766f93056e (commit)
       via  cce2a00af57ef823abeaeff787eff35f43dfb093 (commit)
       via  7e1e150e056d0dcf5a58b2a8036f47c2e5dac820 (commit)
       via  15428e5a9c1bb01f5e7a04979c17ec5f1de9d1db (commit)
       via  ac9fd0a240cbfa8c448cb01bb69ac92313eb7e56 (commit)
       via  ce0544bd0852415891cb31e0c1b7d0ba0b3d19f3 (commit)
       via  dba1e2c7884b5bc68f945fd5d2dd500f9a258c6b (commit)
       via  bc281e8b48c92102d3c64318e07598c8e96e493c (commit)
       via  82667b0cdd6592053f5b2f4cfa1cbd0ec92db0b2 (commit)
       via  71b0ae9ddbcbf4093900ff879e2e1c82be89867f (commit)
       via  1b96c2563342098e05ac4b240c66e60222249cf4 (commit)
       via  ff14da4f9b706a47f152491eae60586b75430c6e (commit)
       via  d23cde8c4285cf55b007b300123c41fa852d38d9 (commit)
       via  885d7987eefb0b8b694626b0831ed93123fb8d8d (commit)
       via  07cd1647921e0e94432cecb2f7a5413cd8f3884e (commit)
       via  82348d8d9d266d91e570c4ae8d8f1afd3315178a (commit)
       via  ee2a86bd4c1472e606b3d59ef5c4392b61d7ab48 (commit)
       via  efea6557fd364ee42c84c08df28efa9797f1c9c8 (commit)
       via  0e662967ac5a6c8e187725828cd20b826ca00000 (commit)
       via  dc979c6874916221df10de3557db0d1b4a19d221 (commit)
       via  925045f2ad19d5dccb7dde77530ea16ea7b6341b (commit)
       via  ba80991049e1e361d2b1de08160c91e5bd38b728 (commit)
       via  faa90e91384af409419363aca539709e2985708b (commit)
       via  1feeca7c2209819dd181f1fbaaa75026d3e38aa2 (commit)
       via  b2cab7978ff20eff1d4fcb4cf60fc8a4421fc24c (commit)
       via  d7713e5c5033ccb0b51769d7f28d91619655b24d (commit)
       via  928dacfdf443393618edf7124a46c599bd760784 (commit)
       via  b34e7172b5f663faf3add7f6e72a3e2d8ffe680a (commit)
       via  7fbc6a734b2a9e33100e57cbea0ce1d20cdf4491 (commit)
       via  9f5c36321d6843ba5b2a0e9e6c10c3ffee7b14fc (commit)
       via  fea1f88cd0bb5bdeefc6048b122da4328635163d (commit)
       via  54ef8963e504e22dcf29405412a95100a210efe5 (commit)
       via  4db53f3593e24b80a33b608432ef463acbec295e (commit)
       via  0b98878ed8a185cbc3b78c860019416bfed317bb (commit)
       via  009d45dfbb20a54ea402e7e8f18bc2d253f41ad6 (commit)
       via  f1d52ff7171da920acc7583fa427a95386312908 (commit)
       via  98953e35ee95489f01fbe87e55fe91d9571fcb48 (commit)
       via  f33ffa77fdcc3e40ec42268ea09b67ac65982f1f (commit)
       via  ac08c1b86b979574678aa110f19fb744719def21 (commit)
       via  747d2952c78ee32acc485946d3922cfe899a4b48 (commit)
       via  f26298e3ae274ccea3d4bcef37f5ac85da383461 (commit)
       via  7489fa475c3f5963323a6b660e4544e48f45d37c (commit)
       via  f00712037fa4b4cbd0d677d998df3728c0c4d8fe (commit)
       via  dae8a2aabc0cc9c9f3794276676872014c5a58fa (commit)
       via  3cebb4e77088feb357b485aeeda26429f98dce9b (commit)
       via  96249117c97e625ec93d94939e9d75fad18ac2df (commit)
       via  dfc13c8130787ee07e2386773a221524ac6d802b (commit)
       via  6ee994f190d58df863c71389bf9f8edd38d8e3eb (commit)
       via  f240d7d1d55f4ae87bfd1acc9c07a90870f59a93 (commit)
       via  1c5a66507b7dc2990709308979354d8e62646a28 (commit)
       via  c5124556a1a8907a84bb2c2bd1912da0c0aaafcc (commit)
       via  19912ea4537e669f9c9ad1108b6f5453025738ef (commit)
       via  3702df52de21023d90052afdc54732d9ad285b39 (commit)
       via  e47f04584b00f6d7b5c8bf9e8ae6af9aaa6831fd (commit)
       via  823e0fcf308c7f3fc88ba48070e12bd995e75392 (commit)
       via  608d45610e9f499fb43d2e52eba461d489a7d45f (commit)
       via  da32354d05eb22cecdf9543f542636d44e503a20 (commit)
       via  c42eef08cd6cb28c898d46c2168c5c08684d5c36 (commit)
       via  e76dc86b0a01a54dab56cbf8552bd0c5fbb5b461 (commit)
       via  f17363ea38564867df555b6be9138d2eff28daa0 (commit)
       via  5fd94aa027828c50e63ae1073d9d6708e0a9c223 (commit)
       via  7b04ab1afedaf73b4492f9e0a9210dc4392ea068 (commit)
       via  16e52275c4c9e355cf4e448a5b17136f24324d7a (commit)
       via  61029d971895738ba353841d99f4ca07ecf792b7 (commit)
       via  4625b640b9b5892da7f35f165407ed3e850353d9 (commit)
       via  1c8043e5b50bd47d7734397a08d5015e3672b9ad (commit)
       via  9819295a58b8b40ca6d95c84f1f1de08fb0eb707 (commit)
       via  dc3b856b460ff380feb68cdff551f334e6db5a27 (commit)
       via  d2f96b7e1e3e4a5917ea73a56429fa645d8ede7c (commit)
       via  f4620596bd798f3c0e1d4b7738a5c4ca1730cf89 (commit)
       via  a01cd4ac5a68a1749593600c0f338620511cae2d (commit)
       via  e62e50f3143aa67bd60c2351ad61d7544f28d4ca (commit)
       via  be9d5fe994e6a086a951e432d56e7de2af3cfd09 (commit)
       via  11b8b873e7fd6722053aa224d20f29350bf2b298 (commit)
       via  b63b9aac20259f3612e23c7a3e977dcb48693ef1 (commit)
       via  14a0766224d50d1c4c409e883cf29515dafc25f0 (commit)
       via  b5fbd9c942b1080aa60a48ee23da60574d1fc22f (commit)
       via  d299036c6ac281d1d6c119c5fdbe603bed404851 (commit)
       via  e5d9f259dce621201a2c52b56b260f8de776ecc0 (commit)
       via  f773f9ac21221663bd093806374cab83abd2288d (commit)
       via  63f4617b5ab99d75e98e40760ff68bb1615a84e6 (commit)
       via  579fd2bf848e994ed6dcd8d1c3633f2fa62cbd28 (commit)
       via  25b02eeaa9acda461629d19c4c6c2b20b5850795 (commit)
       via  e89a3a1302cd3e95403c5c64edb126153852ff35 (commit)
       via  d9d0d1f6cb6c6210f293dcf5c181024d2df787f6 (commit)
       via  c8710633f9cad97adc038852319f1a7a22cebc44 (commit)
       via  c759e90e162192eda89c5046fa446891aac259c7 (commit)
       via  21850ab947dbdf98b1d89afc36d8bcfc6001592e (commit)
       via  9cc8edcca2ab13145a954b44101f7058142d4ac1 (commit)
       via  dd7e5d47df1e9af687cdc87c2d2595893eefec12 (commit)
       via  8907c6a5c71816483099683e0ddcaf11cf3a7912 (commit)
       via  0d2c284222839ff21401cecb7cb567cb0cc04127 (commit)
       via  06aeefc4787c82db7f5443651f099c5af47bd4d6 (commit)
       via  119442008b97f3b39d0ade075dd219a2f781e2a3 (commit)
       via  d42d232acb16847ea8ec775854469e3226cdfe17 (commit)
       via  34634d2ba1efba222403e8a210379d1573759939 (commit)
       via  0373b72ac00aaecb7745cf7fd129424994e2fab8 (commit)
       via  feae0b934e048b17830f49779b01c48136a5b2bf (commit)
       via  8f5f77f8e2819a66de774a4b7f5216ebc631434c (commit)
       via  ced9ddecf6b8f7777125b8d4d2ef1b24ccad34cd (commit)
       via  34cfc02f00196f9f5124172b10de5cc8fea1081f (commit)
       via  45dcf93cb43fbd2f52cd432e38a5c17ae2ded61f (commit)
       via  c18502d5a89af081b1cd4c4b1c112f9458056124 (commit)
       via  ee4916a2db7ff1217c0af65f03220583b80b4568 (commit)
       via  87a4f24037965ae88435ebe3f887750c500cbfde (commit)
       via  aa9497f4d2346e7a18cd07b9bf31dfb5832031bc (commit)
       via  7b0201a4f98ee1b1288ae3b074cd1007707b6b21 (commit)
       via  ba7bc1e14fcf1a223a9a42ede2e9cd7d290c8b61 (commit)
       via  c6ef5865b3fd8e5d5fb8c891467b3722fde4d685 (commit)
       via  589965360a98152e8c783e4736080e06a895feb0 (commit)
       via  cb86d16418ced44b148726104c5c8f9d36a3be49 (commit)
       via  f279d996354eded4defa219a393efa362e157406 (commit)
       via  69336de84b2ae1b5b6a59fa8d817daa1108cea27 (commit)
       via  e05a3418c9d6b3f70cdb387d1f30d8ba59733f02 (commit)
       via  12186e267fb75a77027dc046f78db6ace99b8571 (commit)
       via  c62810c526d75363ed4d668bbdb6b21a5a294a7b (commit)
       via  0710846d8d7a38079b9570aeec9abfb94341af79 (commit)
       via  9517f61cb8ad4f8074b5e6e33c663ca9ed581908 (commit)
       via  3da7e8747dcea9b45c8bc4c17b946be7d5ff9576 (commit)
       via  900a3c5828be90bfce2a7b8e2e6edc0d4509df6a (commit)
       via  d9e757fb15b711464cfc8ba344f2563f3e2b9195 (commit)
       via  517c31a58af1f7b97f308e77caeb8cbe9ef99cf1 (commit)
       via  4c485d0b112721d3a2b2939ab61db14b7608c98c (commit)
       via  be388eb699a8517595ea921082b5ded2d1450dcc (commit)
       via  bf5fbf4c58d67a25c68efea6608ec2b8e89c7597 (commit)
       via  aa7400d4aa132f50a982739e1e8b9752d418b97f (commit)
       via  0711c996f017cabe220dd291500bb1b202f21e1f (commit)
       via  9b2e89cabb6191db86f88ee717f7abc4171fa979 (commit)
       via  07e015d587c487ce1934144abe59010b8f588c81 (commit)
       via  253a3fad875abba510e13a3112b6176b9e272e84 (commit)
       via  566d284cd664a78255f5fbc8881ee8996f835960 (commit)
       via  8d8c3bc259f8b549a2fbace562afb0984cd427ba (commit)
       via  af698f41e199e4942d818accb0cc0ad7589785e8 (commit)
       via  6300d968db6e857e199cf8e4701988bf2f9136a2 (commit)
       via  49d5415d994ab0807daeaacf5e30f9186ca72ff5 (commit)
       via  6a204908cb3f11ba7635d5e0a97a196856fb5748 (commit)
       via  489f9a3bf2078969f746a47a49fdc17d94f898d3 (commit)
       via  7b55eb02488353672fad7160148a40e581cb5c80 (commit)
       via  67f6e4baa87b5555f3bc13919707a3f3180d57f4 (commit)
       via  c0a78a899ad3d96bcfe15715e957eebdb71ecca4 (commit)
       via  6ba745463f9f54496a2f9c2b1a407ab40844bbd4 (commit)
       via  18d0a74b6464ffbe036c41e706d3130a69a38313 (commit)
       via  ae1cf18d06bfc92ba1803ad8bb7c90be844f491e (commit)
       via  26e04c45efa440353cd75365c499fc06ba1eb4ea (commit)
       via  eff38a97dea5a54b7a9f3e1213cd5e8b2b15be37 (commit)
       via  42017c858f5e08f1544620342404904c36d12625 (commit)
       via  fafb108c231295b40b7b0d0ea86caff5031a0c30 (commit)
       via  136adbdab133d19bf900036b3786d5f709ab2082 (commit)
       via  6d842a64386a5c64a5136cadb4a1e646ee1901e5 (commit)
       via  9741148f1166694a65612ea27be4080dbf7194cc (commit)
       via  b4591042f81a9ec8157bc74d023f1fa5c91999e7 (commit)
       via  21b4324449c7091d36fc3e153d3e0f4ea3515278 (commit)
       via  834f8d0f752eda6b2baa5dffb48bc0d86de8c90a (commit)
       via  e108ea6f210bf93250ad4ea23ac3708e1478946e (commit)
       via  1f26ac530c0ca072ff0de69093d38c95b9d3c80a (commit)
       via  27a209e24883177391c382906dcd0104a54faf79 (commit)
       via  1c71878fcb9d5579383561cdaacd78b81fc28694 (commit)
       via  4d18d306085f15ff218dd7dca303aa53122aa2d3 (commit)
       via  71fb105407d496134f0cfcbea73eaea9991dbcf5 (commit)
       via  834d48869745039bbd874d76bcafb4ac6ce7a4e8 (commit)
       via  12114c5c973d70be91bfe946962e4373fa4d890a (commit)
       via  8820f1314ddcaea75e069f2a11bced9bd1b80ef8 (commit)
       via  c5825a1d48bb2def1c6113629e30de4ac9dd2b0a (commit)
       via  a0007d1c88df41e7796f89e24f7af5b40660fbf3 (commit)
       via  7e0ef7c21ad41f0e3047059fef61ddbefe143444 (commit)
       via  7cc84b1bfe00402ea12749c63c7e4d8cef5b2431 (commit)
       via  2cd7eb5d2c64c6a54350e6399f07fd4826933bff (commit)
       via  4f17845a927e33ad9655c3f711177e376bc10e44 (commit)
       via  84a16612dd45bcaca490715039b1bec235e0dfef (commit)
       via  d4dce83017319569f35e617dae47af9041166239 (commit)
       via  3b30727c4ae0b4febedb9795752352bf5154730a (commit)
       via  bf635ee41af43f357b285ab97f04f72b37e8fb64 (commit)
       via  525d9602da83a5d8ddbfc9ebda282209aa743a70 (commit)
       via  c6dc0f2d6f67d69d32e7f8c3c175d79f4b2ef430 (commit)
       via  85b53414c2c8f70e541447ee204e004693289956 (commit)
       via  6c3401b4a9fb79bdee7484e1e3c05758d1b0c0ca (commit)
       via  a5cf5c7b3a6ac9be60a8737f0e36a61897d32acd (commit)
       via  734cae300ccd13aacec1f32b283d4d21b5de8fb5 (commit)
       via  07708b4325680c4731f0d3dc24bca9da3c962d80 (commit)
       via  b4007e4b25d21ba3b693674ca19ead7d202b7de0 (commit)
       via  600d77cc8af4625a30dceda2033c4aadbbbe71ff (commit)
       via  3b1a604abf5709bfda7271fa94213f7d823de69d (commit)
       via  0caae46ef006c8322d489d6b140c0aee91928803 (commit)
       via  688d0a641d4fa7a018fb4f9e131ed1454c68dd15 (commit)
       via  c136060da6a43da5db7e45b6a32da83f0f7d0820 (commit)
       via  70d50df5bc495661463ff19885b9a4112270bafa (commit)
       via  d3ef96824420d7f089b28e6521790191e39949bf (commit)
      from  4b57a79735953705a82d8595a8ac541f7deb7a74 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                          |  471 +++-
 Makefile.am                                        |    6 +-
 README                                             |  222 +-
 compatcheck/Makefile.am                            |    8 +
 compatcheck/README                                 |    5 +
 compatcheck/sqlite3-difftbl-check.py.in            |   60 +
 configure.ac                                       |  169 +-
 doc/Doxyfile                                       |    6 +-
 doc/guide/Makefile.am                              |    7 +-
 doc/guide/bind10-guide.html                        |  835 ++++-
 doc/guide/bind10-guide.txt                         | 1360 ++++++
 doc/guide/bind10-guide.xml                         | 1261 +++++-
 doc/guide/bind10-messages.html                     | 2322 +++++++++--
 doc/guide/bind10-messages.xml                      | 4379 +++++++++++++++++---
 ext/asio/asio/impl/error_code.ipp                  |    3 +
 src/bin/auth/Makefile.am                           |    7 +
 src/bin/auth/auth.spec.pre.in                      |   18 +
 src/bin/auth/auth_config.cc                        |   17 +-
 src/bin/auth/auth_log.h                            |    8 +-
 src/bin/auth/auth_messages.mes                     |   14 +-
 src/bin/auth/auth_srv.cc                           |   78 +-
 src/bin/auth/auth_srv.h                            |   26 +-
 src/bin/auth/b10-auth.8                            |   47 +-
 src/bin/auth/b10-auth.xml                          |   48 +-
 src/bin/auth/benchmarks/Makefile.am                |    7 +
 src/bin/auth/command.cc                            |   23 +-
 src/bin/auth/common.cc                             |    9 +-
 src/bin/auth/query.cc                              |  254 +-
 src/bin/auth/query.h                               |   99 +-
 src/bin/auth/spec_config.h.pre.in                  |   32 +-
 src/bin/auth/statistics.cc                         |   32 +-
 src/bin/auth/statistics.h                          |   22 +-
 src/bin/auth/tests/Makefile.am                     |    8 +
 src/bin/auth/tests/auth_srv_unittest.cc            |  165 +-
 src/bin/auth/tests/command_unittest.cc             |   37 +-
 src/bin/auth/tests/config_unittest.cc              |   46 +-
 src/bin/auth/tests/query_unittest.cc               |  766 ++++-
 src/bin/auth/tests/statistics_unittest.cc          |   74 +-
 src/bin/auth/tests/testdata/Makefile.am            |    2 +-
 src/bin/bind10/Makefile.am                         |   20 +-
 src/bin/bind10/TODO                                |    6 -
 src/bin/bind10/bind10.8                            |  228 +-
 src/bin/bind10/bind10.py.in                        | 1037 -----
 src/bin/bind10/bind10.xml                          |  255 ++-
 src/bin/bind10/bind10_messages.mes                 |  191 +-
 src/bin/bind10/bind10_src.py.in                    |  990 +++++
 src/bin/bind10/bob.spec                            |   84 +-
 src/bin/bind10/creatorapi.txt                      |  123 +
 src/bin/bind10/run_bind10.sh.in                    |    9 +-
 src/bin/bind10/tests/Makefile.am                   |    7 +-
 src/bin/bind10/tests/bind10_test.py.in             |  621 ++-
 src/bin/bindctl/Makefile.am                        |    2 +
 src/bin/bindctl/bindcmd.py                         |  152 +-
 src/bin/bindctl/bindctl_main.py.in                 |   22 +-
 src/bin/bindctl/run_bindctl.sh.in                  |    4 +-
 src/bin/bindctl/tests/Makefile.am                  |    4 +-
 src/bin/bindctl/tests/bindctl_test.py              |  126 +-
 src/bin/cfgmgr/b10-cfgmgr.py.in                    |    2 +-
 src/bin/cfgmgr/plugins/Makefile.am                 |   11 +-
 src/bin/cfgmgr/plugins/tests/Makefile.am           |    6 +-
 src/bin/cfgmgr/tests/Makefile.am                   |   10 +-
 src/bin/cmdctl/Makefile.am                         |   15 +-
 src/bin/cmdctl/cmdctl.py.in                        |  106 +-
 src/bin/cmdctl/cmdctl_messages.mes                 |    5 +-
 src/bin/cmdctl/run_b10-cmdctl.sh.in                |   10 +-
 src/bin/cmdctl/tests/Makefile.am                   |    4 +-
 src/bin/dhcp6/Makefile.am                          |   34 +-
 src/bin/dhcp6/b10-dhcp6.8                          |   29 +-
 src/bin/dhcp6/b10-dhcp6.xml                        |   98 +
 src/bin/dhcp6/dhcp6.h                              |  213 -
 src/bin/dhcp6/dhcp6_srv.cc                         |  231 +
 src/bin/dhcp6/dhcp6_srv.h                          |  156 +
 src/bin/dhcp6/iface_mgr.cc                         |  542 +++
 src/bin/dhcp6/iface_mgr.h                          |  229 +
 src/bin/dhcp6/interfaces.txt                       |   10 +
 src/bin/dhcp6/main.cc                              |   46 +-
 src/bin/dhcp6/tests/Makefile.am                    |   46 +-
 src/bin/dhcp6/tests/dhcp6_srv_unittest.cc          |  148 +
 src/bin/dhcp6/tests/dhcp6_test.py                  |    2 +-
 src/bin/dhcp6/tests/dhcp6_unittests.cc             |   28 +
 src/bin/dhcp6/tests/iface_mgr_unittest.cc          |  367 ++
 src/bin/host/Makefile.am                           |    1 +
 src/bin/host/b10-host.1                            |    4 -
 src/bin/host/b10-host.xml                          |    5 -
 src/bin/loadzone/Makefile.am                       |    1 +
 src/bin/loadzone/run_loadzone.sh.in                |    4 +-
 src/bin/loadzone/tests/correct/Makefile.am         |    4 +-
 src/bin/loadzone/tests/correct/correct_test.sh.in  |    2 +-
 src/bin/loadzone/tests/error/Makefile.am           |    4 +-
 src/bin/loadzone/tests/error/error_test.sh.in      |    2 +-
 src/bin/msgq/Makefile.am                           |    2 +-
 src/bin/msgq/msgq.py.in                            |   33 +-
 src/bin/msgq/tests/Makefile.am                     |    4 +-
 src/bin/resolver/b10-resolver.8                    |   30 +-
 src/bin/resolver/b10-resolver.xml                  |   32 +-
 src/bin/resolver/resolver.cc                       |    5 +-
 src/bin/resolver/resolver_log.h                    |   12 +-
 src/bin/resolver/resolver_messages.mes             |    4 +-
 src/bin/resolver/tests/Makefile.am                 |    2 -
 src/bin/resolver/tests/resolver_config_unittest.cc |    3 +-
 src/bin/sockcreator/README                         |    2 +-
 src/bin/stats/Makefile.am                          |   29 +-
 src/bin/stats/b10-stats-httpd.8                    |    6 +-
 src/bin/stats/b10-stats-httpd.xml                  |   10 +-
 src/bin/stats/b10-stats.8                          |  103 +-
 src/bin/stats/b10-stats.xml                        |  130 +-
 src/bin/stats/stats-httpd-xml.tpl                  |   23 +-
 src/bin/stats/stats-httpd-xsd.tpl                  |   38 +-
 src/bin/stats/stats-httpd-xsl.tpl                  |   26 +-
 src/bin/stats/stats-schema.spec                    |   87 -
 src/bin/stats/stats.py.in                          |  598 ++--
 src/bin/stats/stats.spec                           |  106 +-
 src/bin/stats/stats_httpd.py.in                    |  648 +++-
 src/bin/stats/stats_httpd_messages.mes             |   18 +-
 src/bin/stats/stats_messages.mes                   |   21 +-
 src/bin/stats/tests/Makefile.am                    |   13 +-
 src/bin/stats/tests/b10-stats-httpd_test.py        | 1441 +++++--
 src/bin/stats/tests/b10-stats_test.py              | 1318 +++---
 src/bin/stats/tests/fake_select.py                 |   43 -
 src/bin/stats/tests/fake_socket.py                 |   70 -
 src/bin/stats/tests/fake_time.py                   |   47 -
 src/bin/stats/tests/http/Makefile.am               |    6 -
 src/bin/stats/tests/http/server.py                 |   96 -
 src/bin/stats/tests/isc/Makefile.am                |    8 -
 src/bin/stats/tests/isc/cc/Makefile.am             |    7 -
 src/bin/stats/tests/isc/cc/__init__.py             |    1 -
 src/bin/stats/tests/isc/cc/session.py              |  148 -
 src/bin/stats/tests/isc/config/Makefile.am         |    7 -
 src/bin/stats/tests/isc/config/__init__.py         |    1 -
 src/bin/stats/tests/isc/config/ccsession.py        |  160 -
 src/bin/stats/tests/isc/log/Makefile.am            |    7 -
 src/bin/stats/tests/isc/log/__init__.py            |   33 -
 src/bin/stats/tests/isc/util/Makefile.am           |    7 -
 src/bin/stats/tests/isc/util/process.py            |   21 -
 src/bin/stats/tests/test_utils.py                  |  424 ++
 src/bin/stats/tests/testdata/Makefile.am           |    1 -
 src/bin/stats/tests/testdata/stats_test.spec       |   19 -
 src/bin/tests/Makefile.am                          |    8 +-
 src/bin/tests/process_rename_test.py.in            |    9 +-
 src/bin/xfrin/Makefile.am                          |   15 +-
 src/bin/xfrin/b10-xfrin.8                          |   37 +-
 src/bin/xfrin/b10-xfrin.xml                        |   27 +-
 src/bin/xfrin/tests/Makefile.am                    |   12 +-
 src/bin/xfrin/tests/testdata/Makefile.am           |    2 +
 src/bin/xfrin/tests/testdata/example.com           |   17 +
 src/bin/xfrin/tests/testdata/example.com.sqlite3   |  Bin 0 -> 12288 bytes
 src/bin/xfrin/tests/xfrin_test.py                  | 1910 ++++++++-
 src/bin/xfrin/xfrin.py.in                          |  930 ++++-
 src/bin/xfrin/xfrin.spec                           |    5 +
 src/bin/xfrin/xfrin_messages.mes                   |  140 +-
 src/bin/xfrout/Makefile.am                         |   15 +-
 src/bin/xfrout/b10-xfrout.8                        |   13 +
 src/bin/xfrout/b10-xfrout.xml                      |   33 +
 src/bin/xfrout/tests/Makefile.am                   |   18 +-
 src/bin/xfrout/tests/testdata/creatediff.py        |   58 +
 src/bin/xfrout/tests/testdata/example.com          |    6 +
 src/bin/xfrout/tests/testdata/test.sqlite3         |  Bin 0 -> 12288 bytes
 src/bin/xfrout/tests/xfrout_test.py.in             | 1083 ++++-
 src/bin/xfrout/xfrout.py.in                        |  680 +++-
 src/bin/xfrout/xfrout.spec.pre.in                  |   41 +-
 src/bin/xfrout/xfrout_messages.mes                 |  133 +-
 src/bin/zonemgr/Makefile.am                        |   16 +-
 src/bin/zonemgr/tests/Makefile.am                  |    4 +-
 src/bin/zonemgr/tests/zonemgr_test.py              |   65 +-
 src/bin/zonemgr/zonemgr.py.in                      |  124 +-
 src/bin/zonemgr/zonemgr_messages.mes               |  145 +
 src/cppcheck-suppress.lst                          |    3 +-
 src/lib/Makefile.am                                |    6 +-
 src/lib/acl/Makefile.am                            |    2 +-
 src/lib/acl/dns.cc                                 |   25 +-
 src/lib/acl/dns.h                                  |   26 +-
 src/lib/acl/dnsname_check.h                        |   83 +
 src/lib/acl/loader.h                               |    6 +-
 src/lib/acl/tests/Makefile.am                      |    2 +
 src/lib/acl/tests/dns_test.cc                      |   86 +-
 src/lib/acl/tests/dnsname_check_unittest.cc        |   59 +
 src/lib/asiodns/asiodns_messages.mes               |   10 +-
 src/lib/asiodns/io_fetch.cc                        |   12 +-
 src/lib/asiolink/Makefile.am                       |   12 +-
 src/lib/asiolink/dummy_io_cb.h                     |    7 +-
 src/lib/asiolink/io_address.cc                     |   38 +-
 src/lib/asiolink/io_address.h                      |   42 +
 src/lib/asiolink/io_asio_socket.h                  |   20 +-
 src/lib/asiolink/tests/io_address_unittest.cc      |   38 +
 src/lib/asiolink/tests/io_endpoint_unittest.cc     |    2 +-
 src/lib/bench/Makefile.am                          |    2 +-
 src/lib/bench/tests/Makefile.am                    |    1 +
 src/lib/cache/cache_messages.mes                   |    4 +-
 src/lib/cache/logger.h                             |   17 +-
 src/lib/cache/message_cache.h                      |    2 +
 src/lib/cache/resolver_cache.h                     |    4 +-
 src/lib/cache/rrset_entry.h                        |    4 +-
 src/lib/cache/tests/Makefile.am                    |    1 +
 src/lib/cc/cc_messages.mes                         |    6 +-
 src/lib/cc/data.cc                                 |    2 +
 src/lib/cc/logger.h                                |   25 +-
 src/lib/cc/session.cc                              |    5 +-
 src/lib/config/ccsession.cc                        |  129 +-
 src/lib/config/ccsession.h                         |    4 +-
 src/lib/config/config_log.h                        |    5 +-
 src/lib/config/config_messages.mes                 |   25 +
 src/lib/config/module_spec.cc                      |  115 +-
 src/lib/config/module_spec.h                       |   23 +-
 src/lib/config/tests/Makefile.am                   |    2 +-
 src/lib/config/tests/ccsession_unittests.cc        |   62 +-
 src/lib/config/tests/module_spec_unittests.cc      |  167 +-
 src/lib/config/tests/testdata/Makefile.am          |   12 +
 src/lib/config/tests/testdata/data32_1.data        |    3 +
 src/lib/config/tests/testdata/data32_2.data        |    3 +
 src/lib/config/tests/testdata/data32_3.data        |    3 +
 src/lib/config/tests/testdata/data33_1.data        |    7 +
 src/lib/config/tests/testdata/data33_2.data        |    7 +
 src/lib/config/tests/testdata/spec2.spec           |   11 +
 src/lib/config/tests/testdata/spec32.spec          |   40 +
 src/lib/config/tests/testdata/spec33.spec          |   50 +
 src/lib/config/tests/testdata/spec34.spec          |   14 +
 src/lib/config/tests/testdata/spec35.spec          |   15 +
 src/lib/config/tests/testdata/spec36.spec          |   17 +
 src/lib/config/tests/testdata/spec37.spec          |    7 +
 src/lib/config/tests/testdata/spec38.spec          |   17 +
 src/lib/datasrc/Makefile.am                        |   27 +-
 src/lib/datasrc/client.h                           |  371 ++
 src/lib/datasrc/data_source.h                      |   24 +-
 src/lib/datasrc/database.cc                        | 1174 ++++++
 src/lib/datasrc/database.h                         |  959 +++++
 src/lib/datasrc/datasrc_config.h.pre.in            |   31 +
 src/lib/datasrc/datasrc_messages.mes               |  161 +
 src/lib/datasrc/factory.cc                         |  144 +
 src/lib/datasrc/factory.h                          |  179 +
 src/lib/datasrc/iterator.h                         |  105 +
 src/lib/datasrc/logger.h                           |   18 +-
 src/lib/datasrc/memory_datasrc.cc                  |  417 ++-
 src/lib/datasrc/memory_datasrc.h                   |  229 +-
 src/lib/datasrc/rbtree.h                           |   12 +-
 src/lib/datasrc/sqlite3_accessor.cc                | 1170 ++++++
 src/lib/datasrc/sqlite3_accessor.h                 |  283 ++
 src/lib/datasrc/sqlite3_datasrc.cc                 |   95 +-
 src/lib/datasrc/static_datasrc.cc                  |    1 +
 src/lib/datasrc/tests/Makefile.am                  |   98 +-
 src/lib/datasrc/tests/cache_unittest.cc            |    6 +-
 src/lib/datasrc/tests/client_unittest.cc           |   59 +
 src/lib/datasrc/tests/database_unittest.cc         | 3260 +++++++++++++++
 src/lib/datasrc/tests/factory_unittest.cc          |  240 ++
 src/lib/datasrc/tests/memory_datasrc_unittest.cc   |  723 ++--
 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc | 1194 ++++++
 src/lib/datasrc/tests/static_unittest.cc           |    1 +
 src/lib/datasrc/tests/testdata/Makefile.am         |    1 +
 src/lib/datasrc/tests/testdata/brokendb.sqlite3    |  Bin 2048 -> 4096 bytes
 src/lib/datasrc/tests/testdata/diffs.sqlite3       |  Bin 0 -> 16384 bytes
 src/lib/datasrc/tests/testdata/diffs_table.sql     |  123 +
 src/lib/datasrc/tests/testdata/example.org.sqlite3 |  Bin 14336 -> 14336 bytes
 .../datasrc/tests/testdata/example2.com.sqlite3    |  Bin 11264 -> 14336 bytes
 src/lib/datasrc/tests/testdata/rwtest.sqlite3      |  Bin 0 -> 13312 bytes
 src/lib/datasrc/tests/testdata/test-root.sqlite3   |  Bin 14336 -> 17408 bytes
 src/lib/datasrc/tests/testdata/test.sqlite3        |  Bin 43008 -> 44032 bytes
 .../{test.sqlite3 => test.sqlite3.nodiffs}         |  Bin 43008 -> 43008 bytes
 src/lib/datasrc/tests/zonetable_unittest.cc        |   36 +-
 src/lib/datasrc/zone.h                             |  547 +++-
 src/lib/datasrc/zonetable.cc                       |   12 +-
 src/lib/datasrc/zonetable.h                        |    6 +-
 src/lib/dhcp/Makefile.am                           |   27 +
 src/lib/dhcp/README                                |   11 +
 src/lib/dhcp/dhcp4.h                               |  191 +
 src/lib/dhcp/dhcp6.h                               |  184 +
 src/lib/dhcp/libdhcp.cc                            |  180 +
 src/lib/dhcp/libdhcp.h                             |  103 +
 src/lib/dhcp/option.cc                             |  306 ++
 src/lib/dhcp/option.h                              |  316 ++
 src/lib/dhcp/option4_addrlst.cc                    |  135 +
 src/lib/dhcp/option4_addrlst.h                     |  167 +
 src/lib/dhcp/option6_addrlst.cc                    |  138 +
 src/lib/dhcp/option6_addrlst.h                     |  126 +
 src/lib/dhcp/option6_ia.cc                         |  136 +
 src/lib/dhcp/option6_ia.h                          |  137 +
 src/lib/dhcp/option6_iaaddr.cc                     |  132 +
 src/lib/dhcp/option6_iaaddr.h                      |  145 +
 src/lib/dhcp/pkt4.cc                               |  257 ++
 src/lib/dhcp/pkt4.h                                |  409 ++
 src/lib/dhcp/pkt6.cc                               |  232 ++
 src/lib/dhcp/pkt6.h                                |  234 ++
 src/lib/dhcp/tests/Makefile.am                     |   42 +
 src/lib/dhcp/tests/libdhcp_unittest.cc             |  234 ++
 src/lib/dhcp/tests/option4_addrlst_unittest.cc     |  273 ++
 src/lib/dhcp/tests/option6_addrlst_unittest.cc     |  232 ++
 src/lib/dhcp/tests/option6_ia_unittest.cc          |  266 ++
 src/lib/dhcp/tests/option6_iaaddr_unittest.cc      |  105 +
 src/lib/dhcp/tests/option_unittest.cc              |  419 ++
 src/lib/dhcp/tests/pkt4_unittest.cc                |  564 +++
 src/lib/dhcp/tests/pkt6_unittest.cc                |  207 +
 src/lib/dhcp/tests/run_unittests.cc                |   27 +
 src/lib/dns/Makefile.am                            |   24 +
 src/lib/dns/benchmarks/Makefile.am                 |    1 +
 src/lib/dns/character_string.cc                    |  140 +
 src/lib/dns/character_string.h                     |   57 +
 src/lib/dns/gen-rdatacode.py.in                    |   17 +-
 src/lib/dns/message.cc                             |   43 +-
 src/lib/dns/message.h                              |   59 +-
 src/lib/dns/messagerenderer.cc                     |    2 -
 src/lib/dns/name.cc                                |    2 +-
 src/lib/dns/python/Makefile.am                     |   54 +-
 src/lib/dns/python/edns_python.cc                  |  262 +-
 src/lib/dns/python/edns_python.h                   |   64 +
 src/lib/dns/python/message_python.cc               |  602 ++--
 src/lib/dns/python/message_python.h                |   40 +
 src/lib/dns/python/message_python_inc.cc           |   41 +
 src/lib/dns/python/messagerenderer_python.cc       |   94 +-
 src/lib/dns/python/messagerenderer_python.h        |   37 +-
 src/lib/dns/python/name_python.cc                  |  160 +-
 src/lib/dns/python/name_python.h                   |   45 +-
 src/lib/dns/python/opcode_python.cc                |  231 +-
 src/lib/dns/python/opcode_python.h                 |   64 +
 src/lib/dns/python/pydnspp.cc                      |  733 ++++-
 src/lib/dns/python/pydnspp_common.cc               |   36 +
 src/lib/dns/python/pydnspp_common.h                |    2 -
 src/lib/dns/python/pydnspp_towire.h                |    4 +-
 src/lib/dns/python/question_python.cc              |  271 +-
 src/lib/dns/python/question_python.h               |   66 +
 src/lib/dns/python/rcode_python.cc                 |  109 +-
 src/lib/dns/python/rcode_python.h                  |   49 +-
 src/lib/dns/python/rdata_python.cc                 |  443 ++-
 src/lib/dns/python/rdata_python.h                  |   68 +
 src/lib/dns/python/rrclass_python.cc               |  303 +-
 src/lib/dns/python/rrclass_python.h                |   68 +
 src/lib/dns/python/rrset_python.cc                 |  554 ++--
 src/lib/dns/python/rrset_python.h                  |   78 +
 src/lib/dns/python/rrttl_python.cc                 |  281 +-
 src/lib/dns/python/rrttl_python.h                  |   67 +
 src/lib/dns/python/rrtype_python.cc                |  348 +-
 src/lib/dns/python/rrtype_python.h                 |   68 +
 src/lib/dns/python/serial_python.cc                |  281 ++
 src/lib/dns/python/serial_python.h                 |   64 +
 src/lib/dns/python/tests/Makefile.am               |    3 +-
 src/lib/dns/python/tests/message_python_test.py    |   69 +-
 src/lib/dns/python/tests/name_python_test.py       |    9 +
 src/lib/dns/python/tests/rdata_python_test.py      |    8 +
 src/lib/dns/python/tests/rrset_python_test.py      |    7 +
 src/lib/dns/python/tests/serial_python_test.py     |  111 +
 src/lib/dns/python/tsig_python.cc                  |  105 +-
 src/lib/dns/python/tsig_python.h                   |   28 +-
 src/lib/dns/python/tsig_rdata_python.cc            |   62 +-
 src/lib/dns/python/tsig_rdata_python.h             |   29 +-
 src/lib/dns/python/tsigerror_python.cc             |  105 +-
 src/lib/dns/python/tsigerror_python.h              |   10 +-
 src/lib/dns/python/tsigkey_python.cc               |  133 +-
 src/lib/dns/python/tsigkey_python.h                |   52 +-
 src/lib/dns/python/tsigrecord_python.cc            |   82 +-
 src/lib/dns/python/tsigrecord_python.h             |   28 +-
 src/lib/dns/rdata/any_255/tsig_250.cc              |  127 +-
 src/lib/dns/rdata/generic/afsdb_18.cc              |  171 +
 src/lib/dns/rdata/generic/afsdb_18.h               |   74 +
 src/lib/dns/rdata/generic/detail/ds_like.h         |  225 +
 src/lib/dns/rdata/generic/detail/txt_like.h        |  224 +
 src/lib/dns/rdata/generic/dlv_32769.cc             |  121 +
 src/lib/dns/rdata/generic/dlv_32769.h              |   77 +
 src/lib/dns/rdata/generic/ds_43.cc                 |  109 +-
 src/lib/dns/rdata/generic/ds_43.h                  |   33 +-
 src/lib/dns/rdata/generic/hinfo_13.cc              |  129 +
 src/lib/dns/rdata/generic/hinfo_13.h               |   77 +
 src/lib/dns/rdata/generic/minfo_14.cc              |  156 +
 src/lib/dns/rdata/generic/minfo_14.h               |   82 +
 src/lib/dns/rdata/generic/naptr_35.cc              |  220 +
 src/lib/dns/rdata/generic/naptr_35.h               |   83 +
 src/lib/dns/rdata/generic/nsec_47.cc               |    5 +
 src/lib/dns/rdata/generic/nsec_47.h                |   10 +
 src/lib/dns/rdata/generic/rp_17.cc                 |    1 +
 src/lib/dns/rdata/generic/rrsig_46.cc              |    5 +
 src/lib/dns/rdata/generic/rrsig_46.h               |    3 +
 src/lib/dns/rdata/generic/soa_6.cc                 |    6 +
 src/lib/dns/rdata/generic/soa_6.h                  |    3 +
 src/lib/dns/rdata/generic/spf_99.cc                |  131 +
 src/lib/dns/rdata/generic/spf_99.h                 |   78 +
 src/lib/dns/rdata/generic/txt_16.cc                |  121 +-
 src/lib/dns/rdata/generic/txt_16.h                 |   11 +-
 src/lib/dns/rdata/in_1/dhcid_49.cc                 |  145 +
 src/lib/dns/rdata/in_1/dhcid_49.h                  |   58 +
 src/lib/dns/rdata/in_1/srv_33.cc                   |  245 ++
 src/lib/dns/rdata/in_1/srv_33.h                    |   93 +
 src/lib/dns/rdata/template.cc                      |    1 +
 src/lib/dns/rdatafields.h                          |    2 +-
 src/lib/dns/rrset.h                                |    2 +-
 src/lib/dns/rrtype-placeholder.h                   |    5 +
 src/lib/dns/serial.cc                              |   76 +
 src/lib/dns/serial.h                               |  155 +
 src/lib/dns/tests/Makefile.am                      |   13 +-
 src/lib/dns/tests/character_string_unittest.cc     |   92 +
 src/lib/dns/tests/message_unittest.cc              |  131 +-
 src/lib/dns/tests/rdata_afsdb_unittest.cc          |  210 +
 src/lib/dns/tests/rdata_dhcid_unittest.cc          |  111 +
 src/lib/dns/tests/rdata_ds_like_unittest.cc        |  171 +
 src/lib/dns/tests/rdata_ds_unittest.cc             |   99 -
 src/lib/dns/tests/rdata_hinfo_unittest.cc          |  115 +
 src/lib/dns/tests/rdata_minfo_unittest.cc          |  184 +
 src/lib/dns/tests/rdata_naptr_unittest.cc          |  178 +
 src/lib/dns/tests/rdata_nsec_unittest.cc           |    6 +
 src/lib/dns/tests/rdata_rrsig_unittest.cc          |    2 +-
 src/lib/dns/tests/rdata_soa_unittest.cc            |    5 +
 src/lib/dns/tests/rdata_srv_unittest.cc            |  173 +
 src/lib/dns/tests/rdata_txt_like_unittest.cc       |  261 ++
 src/lib/dns/tests/rdata_txt_unittest.cc            |  166 -
 src/lib/dns/tests/serial_unittest.cc               |  179 +
 src/lib/dns/tests/testdata/Makefile.am             |   33 +-
 src/lib/dns/tests/testdata/gen-wiredata.py.in      |  610 ---
 src/lib/dns/tests/testdata/message_fromWire19.spec |   20 +
 src/lib/dns/tests/testdata/message_fromWire20.spec |   20 +
 src/lib/dns/tests/testdata/message_fromWire21.spec |   20 +
 src/lib/dns/tests/testdata/message_fromWire22.spec |   14 +
 .../dns/tests/testdata/rdata_afsdb_fromWire1.spec  |    3 +
 .../dns/tests/testdata/rdata_afsdb_fromWire2.spec  |    6 +
 .../dns/tests/testdata/rdata_afsdb_fromWire3.spec  |    4 +
 .../dns/tests/testdata/rdata_afsdb_fromWire4.spec  |    4 +
 .../dns/tests/testdata/rdata_afsdb_fromWire5.spec  |    4 +
 .../dns/tests/testdata/rdata_afsdb_toWire1.spec    |    4 +
 .../dns/tests/testdata/rdata_afsdb_toWire2.spec    |    8 +
 src/lib/dns/tests/testdata/rdata_dhcid_fromWire    |   12 +
 src/lib/dns/tests/testdata/rdata_dhcid_toWire      |    7 +
 .../dns/tests/testdata/rdata_minfo_fromWire1.spec  |    3 +
 .../dns/tests/testdata/rdata_minfo_fromWire2.spec  |    7 +
 .../dns/tests/testdata/rdata_minfo_fromWire3.spec  |    6 +
 .../dns/tests/testdata/rdata_minfo_fromWire4.spec  |    6 +
 .../dns/tests/testdata/rdata_minfo_fromWire5.spec  |    5 +
 .../dns/tests/testdata/rdata_minfo_fromWire6.spec  |    5 +
 .../dns/tests/testdata/rdata_minfo_toWire1.spec    |    5 +
 .../dns/tests/testdata/rdata_minfo_toWire2.spec    |    6 +
 .../testdata/rdata_minfo_toWireUncompressed1.spec  |    7 +
 .../testdata/rdata_minfo_toWireUncompressed2.spec  |    8 +
 src/lib/dns/tests/testdata/rdata_srv_fromWire      |   36 +
 src/lib/dns/tsigkey.h                              |   13 +-
 src/lib/exceptions/exceptions.h                    |   23 +
 src/lib/log/Makefile.am                            |    3 +-
 src/lib/log/README                                 |    5 +
 src/lib/log/log_dbglevels.h                        |   93 +
 src/lib/log/log_formatter.h                        |    2 +-
 src/lib/log/logger_level_impl.h                    |    2 +-
 src/lib/log/logger_manager_impl.h                  |    2 -
 src/lib/log/logger_specification.h                 |    2 +-
 src/lib/log/macros.h                               |    1 +
 src/lib/log/message_dictionary.h                   |    2 +-
 src/lib/log/tests/Makefile.am                      |    6 +-
 src/lib/nsas/nameserver_address_store.h            |    5 +-
 src/lib/nsas/nsas_log.h                            |    6 +-
 src/lib/nsas/zone_entry.h                          |    2 +-
 src/lib/python/Makefile.am                         |    9 +-
 src/lib/python/bind10_config.py.in                 |    4 +
 src/lib/python/isc/Makefile.am                     |    3 +-
 src/lib/python/isc/__init__.py                     |    7 +-
 src/lib/python/isc/acl/Makefile.am                 |   24 +-
 src/lib/python/isc/acl/_dns.py                     |   29 +
 src/lib/python/isc/acl/dns.cc                      |    4 +-
 src/lib/python/isc/acl/dns.py                      |   76 +-
 src/lib/python/isc/acl/dns_requestacl_python.cc    |    4 +-
 src/lib/python/isc/acl/dns_requestcontext_inc.cc   |   19 +-
 .../python/isc/acl/dns_requestcontext_python.cc    |  129 +-
 src/lib/python/isc/acl/dns_requestloader_python.cc |    4 +-
 src/lib/python/isc/acl/tests/Makefile.am           |    4 +-
 src/lib/python/isc/acl/tests/dns_test.py           |   87 +-
 src/lib/python/isc/bind10/Makefile.am              |    4 +
 .../http => lib/python/isc/bind10}/__init__.py     |    0 
 src/lib/python/isc/bind10/component.py             |  647 +++
 src/lib/python/isc/bind10/sockcreator.py           |  239 ++
 src/lib/python/isc/bind10/special_component.py     |  152 +
 src/lib/python/isc/bind10/tests/Makefile.am        |   29 +
 src/lib/python/isc/bind10/tests/component_test.py  | 1032 +++++
 .../python/isc/bind10/tests/sockcreator_test.py    |  327 ++
 src/lib/python/isc/cc/data.py                      |   18 +-
 src/lib/python/isc/cc/tests/Makefile.am            |    4 +-
 src/lib/python/isc/config/Makefile.am              |   32 +-
 src/lib/python/isc/config/ccsession.py             |  172 +-
 src/lib/python/isc/config/cfgmgr.py                |   30 +-
 src/lib/python/isc/config/config_data.py           |  135 +-
 src/lib/python/isc/config/module_spec.py           |  127 +-
 src/lib/python/isc/config/tests/Makefile.am        |    4 +-
 src/lib/python/isc/config/tests/ccsession_test.py  |   87 +-
 src/lib/python/isc/config/tests/cfgmgr_test.py     |   38 +-
 .../python/isc/config/tests/config_data_test.py    |   55 +-
 .../python/isc/config/tests/module_spec_test.py    |  112 +
 src/lib/python/isc/datasrc/Makefile.am             |   32 +-
 src/lib/python/isc/datasrc/__init__.py             |   35 +-
 src/lib/python/isc/datasrc/client_inc.cc           |  249 ++
 src/lib/python/isc/datasrc/client_python.cc        |  347 ++
 src/lib/python/isc/datasrc/client_python.h         |   35 +
 src/lib/python/isc/datasrc/datasrc.cc              |  297 ++
 src/lib/python/isc/datasrc/datasrc.h               |   50 +
 src/lib/python/isc/datasrc/finder_inc.cc           |  134 +
 src/lib/python/isc/datasrc/finder_python.cc        |  289 ++
 src/lib/python/isc/datasrc/finder_python.h         |   44 +
 src/lib/python/isc/datasrc/iterator_inc.cc         |   67 +
 src/lib/python/isc/datasrc/iterator_python.cc      |  242 ++
 src/lib/python/isc/datasrc/iterator_python.h       |   46 +
 src/lib/python/isc/datasrc/journal_reader_inc.cc   |   80 +
 .../python/isc/datasrc/journal_reader_python.cc    |  200 +
 src/lib/python/isc/datasrc/journal_reader_python.h |   47 +
 src/lib/python/isc/datasrc/sqlite3_ds.py           |   92 +-
 src/lib/python/isc/datasrc/tests/Makefile.am       |   19 +-
 src/lib/python/isc/datasrc/tests/datasrc_test.py   |  854 ++++
 .../python/isc/datasrc/tests/sqlite3_ds_test.py    |   50 +-
 .../isc/datasrc/tests/testdata/example.com.sqlite3 |  Bin 43008 -> 44032 bytes
 .../datasrc/tests/testdata/test.sqlite3.nodiffs}   |  Bin 43008 -> 43008 bytes
 src/lib/python/isc/datasrc/updater_inc.cc          |  181 +
 src/lib/python/isc/datasrc/updater_python.cc       |  288 ++
 src/lib/python/isc/datasrc/updater_python.h        |   47 +
 src/lib/python/isc/dns/Makefile.am                 |    8 +
 src/lib/python/isc/log/log.cc                      |  215 +-
 src/lib/python/isc/log/tests/Makefile.am           |   24 +-
 src/lib/python/isc/log/tests/log_test.py           |   41 +
 src/lib/python/isc/log_messages/Makefile.am        |   32 +
 src/lib/python/isc/log_messages/README             |   68 +
 src/lib/python/isc/log_messages/__init__.py        |    3 +
 src/lib/python/isc/log_messages/bind10_messages.py |    1 +
 src/lib/python/isc/log_messages/cfgmgr_messages.py |    1 +
 src/lib/python/isc/log_messages/cmdctl_messages.py |    1 +
 src/lib/python/isc/log_messages/config_messages.py |    1 +
 src/lib/python/isc/log_messages/gen-forwarder.sh   |   14 +
 .../python/isc/log_messages/libxfrin_messages.py   |    1 +
 .../python/isc/log_messages/notify_out_messages.py |    1 +
 .../isc/log_messages/stats_httpd_messages.py       |    1 +
 src/lib/python/isc/log_messages/stats_messages.py  |    1 +
 src/lib/python/isc/log_messages/work/Makefile.am   |   12 +
 .../python/isc/log_messages/work/__init__.py.in    |    3 +
 src/lib/python/isc/log_messages/xfrin_messages.py  |    1 +
 src/lib/python/isc/log_messages/xfrout_messages.py |    1 +
 .../python/isc/log_messages/zonemgr_messages.py    |    1 +
 src/lib/python/isc/net/tests/Makefile.am           |    4 +-
 src/lib/python/isc/notify/Makefile.am              |   17 +-
 src/lib/python/isc/notify/notify_out.py            |  152 +-
 src/lib/python/isc/notify/notify_out_messages.mes  |   23 +-
 src/lib/python/isc/notify/tests/Makefile.am        |   14 +-
 src/lib/python/isc/notify/tests/notify_out_test.py |   73 +-
 .../isc/notify/tests/testdata/brokentest.sqlite3   |  Bin 0 -> 11264 bytes
 .../python/isc/notify/tests/testdata/example.com   |   10 +
 .../python/isc/notify/tests/testdata/example.net   |   14 +
 .../isc/notify/tests/testdata/multisoa.example     |    5 +
 .../python/isc/notify/tests/testdata/nons.example  |    3 +
 .../python/isc/notify/tests/testdata/nosoa.example |    7 +
 .../python/isc/notify/tests/testdata/test.sqlite3  |  Bin 0 -> 13312 bytes
 src/lib/python/isc/testutils/Makefile.am           |    2 +-
 src/lib/python/isc/testutils/rrset_utils.py        |   82 +
 src/lib/python/isc/util/tests/Makefile.am          |    4 +-
 src/lib/python/isc/xfrin/Makefile.am               |   23 +
 .../tests/isc => lib/python/isc/xfrin}/__init__.py |    0 
 src/lib/python/isc/xfrin/diff.py                   |  249 ++
 src/lib/python/isc/xfrin/libxfrin_messages.mes     |   31 +
 src/lib/python/isc/xfrin/tests/Makefile.am         |   24 +
 src/lib/python/isc/xfrin/tests/diff_tests.py       |  466 +++
 src/lib/resolve/recursive_query.cc                 |    1 +
 src/lib/resolve/recursive_query.h                  |   16 +-
 src/lib/resolve/resolve.h                          |    1 -
 src/lib/resolve/resolve_log.h                      |    8 +-
 src/lib/resolve/tests/Makefile.am                  |    1 +
 src/lib/server_common/client.h                     |    2 +-
 src/lib/server_common/logger.h                     |   13 +-
 src/lib/testutils/Makefile.am                      |    2 +-
 src/lib/testutils/dnsmessage_test.h                |   28 +-
 src/lib/testutils/testdata/Makefile.am             |    2 +-
 src/lib/util/Makefile.am                           |    2 +-
 src/lib/util/buffer.h                              |   22 +-
 src/lib/util/filename.h                            |    5 +
 src/lib/util/io_utilities.h                        |   45 +-
 src/lib/util/python/Makefile.am                    |    1 +
 src/lib/util/python/gen_wiredata.py.in             | 1232 ++++++
 src/lib/util/python/pycppwrapper_util.h            |    2 +-
 src/lib/util/python/wrapper_template.cc            |    4 +-
 src/lib/util/python/wrapper_template.h             |    6 +-
 src/lib/util/pyunittests/Makefile.am               |    7 +-
 src/lib/util/strutil.cc                            |   11 +
 src/lib/util/strutil.h                             |   62 +
 src/lib/util/tests/buffer_unittest.cc              |   32 +
 src/lib/util/tests/filename_unittest.cc            |   15 +
 src/lib/util/tests/io_utilities_unittest.cc        |   46 +
 src/lib/util/tests/strutil_unittest.cc             |   80 +-
 src/lib/util/unittests/Makefile.am                 |    2 +-
 tests/lettuce/README                               |  127 +
 tests/lettuce/README.tutorial                      |  157 +
 .../lettuce/configurations/example.org.config.orig |   17 +
 tests/lettuce/configurations/example2.org.config   |   18 +
 tests/lettuce/configurations/no_db_file.config     |   10 +
 .../configurations/xfrin/retransfer_master.conf    |   22 +
 .../configurations/xfrin/retransfer_slave.conf     |   17 +
 tests/lettuce/data/empty_db.sqlite3                |  Bin 0 -> 11264 bytes
 .../lettuce/data}/example.org.sqlite3              |  Bin 14336 -> 14336 bytes
 tests/lettuce/features/example.feature             |  142 +
 tests/lettuce/features/terrain/bind10_control.py   |  144 +
 tests/lettuce/features/terrain/querying.py         |  279 ++
 tests/lettuce/features/terrain/steps.py            |   85 +
 tests/lettuce/features/terrain/terrain.py          |  363 ++
 tests/lettuce/features/xfrin_bind10.feature        |   11 +
 tests/lettuce/setup_intree_bind10.sh.in            |   46 +
 tests/system/README                                |   53 +-
 tests/system/bindctl/tests.sh                      |   21 +-
 tests/system/cleanall.sh                           |    5 +-
 tests/system/common/rndc.conf                      |   25 +
 tests/system/common/rndc.key                       |   22 +
 tests/system/conf.sh.in                            |   45 +-
 tests/system/ixfr/README                           |   86 +
 tests/system/ixfr/b10-config.db.in                 |   23 +
 tests/system/ixfr/clean_ns.sh                      |   28 +
 tests/system/ixfr/common_tests.sh.in               |   78 +
 tests/system/ixfr/db.example.common                | 1556 +++++++
 tests/system/ixfr/db.example.n0.in                 |   29 +
 tests/system/ixfr/db.example.n2.in                 |   28 +
 tests/system/ixfr/db.example.n2.refresh.in         |   28 +
 tests/system/ixfr/db.example.n4.in                 |   31 +
 tests/system/ixfr/db.example.n6.in                 |   29 +
 tests/system/ixfr/in-1/clean.sh                    |    1 +
 tests/system/ixfr/in-1/ns1/README                  |    3 +
 tests/system/ixfr/in-1/nsx2/README                 |    3 +
 tests/system/ixfr/in-1/setup.sh.in                 |   30 +
 tests/system/ixfr/in-1/tests.sh                    |   37 +
 tests/system/ixfr/in-2/clean.sh                    |    1 +
 tests/system/ixfr/in-2/ns1/README                  |    3 +
 tests/system/ixfr/in-2/nsx2/README                 |    3 +
 tests/system/ixfr/in-2/setup.sh.in                 |   29 +
 tests/system/ixfr/in-2/tests.sh                    |   81 +
 tests/system/ixfr/in-3/clean.sh                    |    1 +
 tests/system/ixfr/in-3/ns1/README                  |    3 +
 tests/system/ixfr/in-3/nsx2/README                 |    3 +
 tests/system/ixfr/in-3/setup.sh.in                 |   29 +
 tests/system/ixfr/in-3/tests.sh                    |   66 +
 tests/system/ixfr/in-4/clean.sh                    |    1 +
 tests/system/ixfr/in-4/ns1/README                  |    3 +
 tests/system/ixfr/in-4/nsx2/README                 |    3 +
 tests/system/ixfr/in-4/setup.sh.in                 |   30 +
 tests/system/ixfr/in-4/tests.sh                    |   53 +
 tests/system/ixfr/ixfr_init.sh.in                  |  330 ++
 tests/system/ixfr/named_noixfr.conf                |   42 +
 tests/system/ixfr/named_nonotify.conf              |   40 +
 tests/system/ixfr/named_notify.conf                |   41 +
 tests/system/run.sh                                |  125 -
 tests/system/run.sh.in                             |  125 +
 tests/system/start.pl                              |    4 +-
 tests/tools/badpacket/badpacket.cc                 |    2 +
 630 files changed, 65961 insertions(+), 11809 deletions(-)
 create mode 100644 compatcheck/Makefile.am
 create mode 100644 compatcheck/README
 create mode 100755 compatcheck/sqlite3-difftbl-check.py.in
 create mode 100644 doc/guide/bind10-guide.txt
 delete mode 100755 src/bin/bind10/bind10.py.in
 create mode 100755 src/bin/bind10/bind10_src.py.in
 create mode 100644 src/bin/bind10/creatorapi.txt
 create mode 100644 src/bin/dhcp6/b10-dhcp6.xml
 delete mode 100644 src/bin/dhcp6/dhcp6.h
 create mode 100644 src/bin/dhcp6/dhcp6_srv.cc
 create mode 100644 src/bin/dhcp6/dhcp6_srv.h
 create mode 100644 src/bin/dhcp6/iface_mgr.cc
 create mode 100644 src/bin/dhcp6/iface_mgr.h
 create mode 100644 src/bin/dhcp6/interfaces.txt
 create mode 100644 src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
 create mode 100644 src/bin/dhcp6/tests/dhcp6_unittests.cc
 create mode 100644 src/bin/dhcp6/tests/iface_mgr_unittest.cc
 mode change 100644 => 100755 src/bin/loadzone/tests/correct/correct_test.sh.in
 mode change 100644 => 100755 src/bin/loadzone/tests/error/error_test.sh.in
 delete mode 100644 src/bin/stats/stats-schema.spec
 mode change 100644 => 100755 src/bin/stats/stats.py.in
 mode change 100755 => 100644 src/bin/stats/stats_httpd.py.in
 delete mode 100644 src/bin/stats/tests/fake_select.py
 delete mode 100644 src/bin/stats/tests/fake_socket.py
 delete mode 100644 src/bin/stats/tests/fake_time.py
 delete mode 100644 src/bin/stats/tests/http/Makefile.am
 delete mode 100644 src/bin/stats/tests/http/server.py
 delete mode 100644 src/bin/stats/tests/isc/Makefile.am
 delete mode 100644 src/bin/stats/tests/isc/cc/Makefile.am
 delete mode 100644 src/bin/stats/tests/isc/cc/__init__.py
 delete mode 100644 src/bin/stats/tests/isc/cc/session.py
 delete mode 100644 src/bin/stats/tests/isc/config/Makefile.am
 delete mode 100644 src/bin/stats/tests/isc/config/__init__.py
 delete mode 100644 src/bin/stats/tests/isc/config/ccsession.py
 delete mode 100644 src/bin/stats/tests/isc/log/Makefile.am
 delete mode 100644 src/bin/stats/tests/isc/log/__init__.py
 delete mode 100644 src/bin/stats/tests/isc/util/Makefile.am
 delete mode 100644 src/bin/stats/tests/isc/util/__init__.py
 delete mode 100644 src/bin/stats/tests/isc/util/process.py
 create mode 100644 src/bin/stats/tests/test_utils.py
 delete mode 100644 src/bin/stats/tests/testdata/Makefile.am
 delete mode 100644 src/bin/stats/tests/testdata/stats_test.spec
 create mode 100644 src/bin/xfrin/tests/testdata/Makefile.am
 create mode 100644 src/bin/xfrin/tests/testdata/example.com
 create mode 100644 src/bin/xfrin/tests/testdata/example.com.sqlite3
 create mode 100755 src/bin/xfrout/tests/testdata/creatediff.py
 create mode 100644 src/bin/xfrout/tests/testdata/example.com
 create mode 100644 src/bin/xfrout/tests/testdata/test.sqlite3
 create mode 100644 src/bin/zonemgr/zonemgr_messages.mes
 create mode 100644 src/lib/acl/dnsname_check.h
 create mode 100644 src/lib/acl/tests/dnsname_check_unittest.cc
 create mode 100644 src/lib/config/tests/testdata/data32_1.data
 create mode 100644 src/lib/config/tests/testdata/data32_2.data
 create mode 100644 src/lib/config/tests/testdata/data32_3.data
 create mode 100644 src/lib/config/tests/testdata/data33_1.data
 create mode 100644 src/lib/config/tests/testdata/data33_2.data
 create mode 100644 src/lib/config/tests/testdata/spec32.spec
 create mode 100644 src/lib/config/tests/testdata/spec33.spec
 create mode 100644 src/lib/config/tests/testdata/spec34.spec
 create mode 100644 src/lib/config/tests/testdata/spec35.spec
 create mode 100644 src/lib/config/tests/testdata/spec36.spec
 create mode 100644 src/lib/config/tests/testdata/spec37.spec
 create mode 100644 src/lib/config/tests/testdata/spec38.spec
 create mode 100644 src/lib/datasrc/client.h
 create mode 100644 src/lib/datasrc/database.cc
 create mode 100644 src/lib/datasrc/database.h
 create mode 100644 src/lib/datasrc/datasrc_config.h.pre.in
 create mode 100644 src/lib/datasrc/factory.cc
 create mode 100644 src/lib/datasrc/factory.h
 create mode 100644 src/lib/datasrc/iterator.h
 create mode 100644 src/lib/datasrc/sqlite3_accessor.cc
 create mode 100644 src/lib/datasrc/sqlite3_accessor.h
 create mode 100644 src/lib/datasrc/tests/client_unittest.cc
 create mode 100644 src/lib/datasrc/tests/database_unittest.cc
 create mode 100644 src/lib/datasrc/tests/factory_unittest.cc
 create mode 100644 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
 create mode 100644 src/lib/datasrc/tests/testdata/Makefile.am
 create mode 100644 src/lib/datasrc/tests/testdata/diffs.sqlite3
 create mode 100644 src/lib/datasrc/tests/testdata/diffs_table.sql
 create mode 100644 src/lib/datasrc/tests/testdata/rwtest.sqlite3
 copy src/lib/datasrc/tests/testdata/{test.sqlite3 => test.sqlite3.nodiffs} (100%)
 create mode 100644 src/lib/dhcp/Makefile.am
 create mode 100644 src/lib/dhcp/README
 create mode 100644 src/lib/dhcp/dhcp4.h
 create mode 100644 src/lib/dhcp/dhcp6.h
 create mode 100644 src/lib/dhcp/libdhcp.cc
 create mode 100644 src/lib/dhcp/libdhcp.h
 create mode 100644 src/lib/dhcp/option.cc
 create mode 100644 src/lib/dhcp/option.h
 create mode 100644 src/lib/dhcp/option4_addrlst.cc
 create mode 100644 src/lib/dhcp/option4_addrlst.h
 create mode 100644 src/lib/dhcp/option6_addrlst.cc
 create mode 100644 src/lib/dhcp/option6_addrlst.h
 create mode 100644 src/lib/dhcp/option6_ia.cc
 create mode 100644 src/lib/dhcp/option6_ia.h
 create mode 100644 src/lib/dhcp/option6_iaaddr.cc
 create mode 100644 src/lib/dhcp/option6_iaaddr.h
 create mode 100644 src/lib/dhcp/pkt4.cc
 create mode 100644 src/lib/dhcp/pkt4.h
 create mode 100644 src/lib/dhcp/pkt6.cc
 create mode 100644 src/lib/dhcp/pkt6.h
 create mode 100644 src/lib/dhcp/tests/Makefile.am
 create mode 100644 src/lib/dhcp/tests/libdhcp_unittest.cc
 create mode 100644 src/lib/dhcp/tests/option4_addrlst_unittest.cc
 create mode 100644 src/lib/dhcp/tests/option6_addrlst_unittest.cc
 create mode 100644 src/lib/dhcp/tests/option6_ia_unittest.cc
 create mode 100644 src/lib/dhcp/tests/option6_iaaddr_unittest.cc
 create mode 100644 src/lib/dhcp/tests/option_unittest.cc
 create mode 100644 src/lib/dhcp/tests/pkt4_unittest.cc
 create mode 100644 src/lib/dhcp/tests/pkt6_unittest.cc
 create mode 100644 src/lib/dhcp/tests/run_unittests.cc
 create mode 100644 src/lib/dns/character_string.cc
 create mode 100644 src/lib/dns/character_string.h
 create mode 100644 src/lib/dns/python/edns_python.h
 create mode 100644 src/lib/dns/python/message_python.h
 create mode 100644 src/lib/dns/python/message_python_inc.cc
 create mode 100644 src/lib/dns/python/opcode_python.h
 create mode 100644 src/lib/dns/python/question_python.h
 create mode 100644 src/lib/dns/python/rdata_python.h
 create mode 100644 src/lib/dns/python/rrclass_python.h
 create mode 100644 src/lib/dns/python/rrset_python.h
 create mode 100644 src/lib/dns/python/rrttl_python.h
 create mode 100644 src/lib/dns/python/rrtype_python.h
 create mode 100644 src/lib/dns/python/serial_python.cc
 create mode 100644 src/lib/dns/python/serial_python.h
 create mode 100644 src/lib/dns/python/tests/serial_python_test.py
 create mode 100644 src/lib/dns/rdata/generic/afsdb_18.cc
 create mode 100644 src/lib/dns/rdata/generic/afsdb_18.h
 create mode 100644 src/lib/dns/rdata/generic/detail/ds_like.h
 create mode 100644 src/lib/dns/rdata/generic/detail/txt_like.h
 create mode 100644 src/lib/dns/rdata/generic/dlv_32769.cc
 create mode 100644 src/lib/dns/rdata/generic/dlv_32769.h
 create mode 100644 src/lib/dns/rdata/generic/hinfo_13.cc
 create mode 100644 src/lib/dns/rdata/generic/hinfo_13.h
 create mode 100644 src/lib/dns/rdata/generic/minfo_14.cc
 create mode 100644 src/lib/dns/rdata/generic/minfo_14.h
 create mode 100644 src/lib/dns/rdata/generic/naptr_35.cc
 create mode 100644 src/lib/dns/rdata/generic/naptr_35.h
 create mode 100644 src/lib/dns/rdata/generic/spf_99.cc
 create mode 100644 src/lib/dns/rdata/generic/spf_99.h
 create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.cc
 create mode 100644 src/lib/dns/rdata/in_1/dhcid_49.h
 create mode 100644 src/lib/dns/rdata/in_1/srv_33.cc
 create mode 100644 src/lib/dns/rdata/in_1/srv_33.h
 create mode 100644 src/lib/dns/serial.cc
 create mode 100644 src/lib/dns/serial.h
 create mode 100644 src/lib/dns/tests/character_string_unittest.cc
 create mode 100644 src/lib/dns/tests/rdata_afsdb_unittest.cc
 create mode 100644 src/lib/dns/tests/rdata_dhcid_unittest.cc
 create mode 100644 src/lib/dns/tests/rdata_ds_like_unittest.cc
 delete mode 100644 src/lib/dns/tests/rdata_ds_unittest.cc
 create mode 100644 src/lib/dns/tests/rdata_hinfo_unittest.cc
 create mode 100644 src/lib/dns/tests/rdata_minfo_unittest.cc
 create mode 100644 src/lib/dns/tests/rdata_naptr_unittest.cc
 create mode 100644 src/lib/dns/tests/rdata_srv_unittest.cc
 create mode 100644 src/lib/dns/tests/rdata_txt_like_unittest.cc
 delete mode 100644 src/lib/dns/tests/rdata_txt_unittest.cc
 create mode 100644 src/lib/dns/tests/serial_unittest.cc
 delete mode 100755 src/lib/dns/tests/testdata/gen-wiredata.py.in
 create mode 100644 src/lib/dns/tests/testdata/message_fromWire19.spec
 create mode 100644 src/lib/dns/tests/testdata/message_fromWire20.spec
 create mode 100644 src/lib/dns/tests/testdata/message_fromWire21.spec
 create mode 100644 src/lib/dns/tests/testdata/message_fromWire22.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_dhcid_fromWire
 create mode 100644 src/lib/dns/tests/testdata/rdata_dhcid_toWire
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
 create mode 100644 src/lib/dns/tests/testdata/rdata_srv_fromWire
 create mode 100644 src/lib/log/log_dbglevels.h
 create mode 100644 src/lib/python/isc/acl/_dns.py
 create mode 100644 src/lib/python/isc/bind10/Makefile.am
 rename src/{bin/stats/tests/http => lib/python/isc/bind10}/__init__.py (100%)
 create mode 100644 src/lib/python/isc/bind10/component.py
 create mode 100644 src/lib/python/isc/bind10/sockcreator.py
 create mode 100644 src/lib/python/isc/bind10/special_component.py
 create mode 100644 src/lib/python/isc/bind10/tests/Makefile.am
 create mode 100644 src/lib/python/isc/bind10/tests/component_test.py
 create mode 100644 src/lib/python/isc/bind10/tests/sockcreator_test.py
 create mode 100644 src/lib/python/isc/datasrc/client_inc.cc
 create mode 100644 src/lib/python/isc/datasrc/client_python.cc
 create mode 100644 src/lib/python/isc/datasrc/client_python.h
 create mode 100644 src/lib/python/isc/datasrc/datasrc.cc
 create mode 100644 src/lib/python/isc/datasrc/datasrc.h
 create mode 100644 src/lib/python/isc/datasrc/finder_inc.cc
 create mode 100644 src/lib/python/isc/datasrc/finder_python.cc
 create mode 100644 src/lib/python/isc/datasrc/finder_python.h
 create mode 100644 src/lib/python/isc/datasrc/iterator_inc.cc
 create mode 100644 src/lib/python/isc/datasrc/iterator_python.cc
 create mode 100644 src/lib/python/isc/datasrc/iterator_python.h
 create mode 100644 src/lib/python/isc/datasrc/journal_reader_inc.cc
 create mode 100644 src/lib/python/isc/datasrc/journal_reader_python.cc
 create mode 100644 src/lib/python/isc/datasrc/journal_reader_python.h
 create mode 100644 src/lib/python/isc/datasrc/tests/datasrc_test.py
 copy src/lib/{datasrc/tests/testdata/test.sqlite3 => python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs} (100%)
 create mode 100644 src/lib/python/isc/datasrc/updater_inc.cc
 create mode 100644 src/lib/python/isc/datasrc/updater_python.cc
 create mode 100644 src/lib/python/isc/datasrc/updater_python.h
 create mode 100644 src/lib/python/isc/dns/Makefile.am
 create mode 100644 src/lib/python/isc/log_messages/Makefile.am
 create mode 100644 src/lib/python/isc/log_messages/README
 create mode 100644 src/lib/python/isc/log_messages/__init__.py
 create mode 100644 src/lib/python/isc/log_messages/bind10_messages.py
 create mode 100644 src/lib/python/isc/log_messages/cfgmgr_messages.py
 create mode 100644 src/lib/python/isc/log_messages/cmdctl_messages.py
 create mode 100644 src/lib/python/isc/log_messages/config_messages.py
 create mode 100755 src/lib/python/isc/log_messages/gen-forwarder.sh
 create mode 100644 src/lib/python/isc/log_messages/libxfrin_messages.py
 create mode 100644 src/lib/python/isc/log_messages/notify_out_messages.py
 create mode 100644 src/lib/python/isc/log_messages/stats_httpd_messages.py
 create mode 100644 src/lib/python/isc/log_messages/stats_messages.py
 create mode 100644 src/lib/python/isc/log_messages/work/Makefile.am
 create mode 100644 src/lib/python/isc/log_messages/work/__init__.py.in
 create mode 100644 src/lib/python/isc/log_messages/xfrin_messages.py
 create mode 100644 src/lib/python/isc/log_messages/xfrout_messages.py
 create mode 100644 src/lib/python/isc/log_messages/zonemgr_messages.py
 create mode 100644 src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
 create mode 100644 src/lib/python/isc/notify/tests/testdata/example.com
 create mode 100644 src/lib/python/isc/notify/tests/testdata/example.net
 create mode 100644 src/lib/python/isc/notify/tests/testdata/multisoa.example
 create mode 100644 src/lib/python/isc/notify/tests/testdata/nons.example
 create mode 100644 src/lib/python/isc/notify/tests/testdata/nosoa.example
 create mode 100644 src/lib/python/isc/notify/tests/testdata/test.sqlite3
 create mode 100644 src/lib/python/isc/testutils/rrset_utils.py
 create mode 100644 src/lib/python/isc/xfrin/Makefile.am
 rename src/{bin/stats/tests/isc => lib/python/isc/xfrin}/__init__.py (100%)
 create mode 100644 src/lib/python/isc/xfrin/diff.py
 create mode 100644 src/lib/python/isc/xfrin/libxfrin_messages.mes
 create mode 100644 src/lib/python/isc/xfrin/tests/Makefile.am
 create mode 100644 src/lib/python/isc/xfrin/tests/diff_tests.py
 create mode 100644 src/lib/util/python/Makefile.am
 create mode 100755 src/lib/util/python/gen_wiredata.py.in
 create mode 100644 tests/lettuce/README
 create mode 100644 tests/lettuce/README.tutorial
 create mode 100644 tests/lettuce/configurations/example.org.config.orig
 create mode 100644 tests/lettuce/configurations/example2.org.config
 create mode 100644 tests/lettuce/configurations/no_db_file.config
 create mode 100644 tests/lettuce/configurations/xfrin/retransfer_master.conf
 create mode 100644 tests/lettuce/configurations/xfrin/retransfer_slave.conf
 create mode 100644 tests/lettuce/data/empty_db.sqlite3
 copy {src/lib/datasrc/tests/testdata => tests/lettuce/data}/example.org.sqlite3 (100%)
 create mode 100644 tests/lettuce/features/example.feature
 create mode 100644 tests/lettuce/features/terrain/bind10_control.py
 create mode 100644 tests/lettuce/features/terrain/querying.py
 create mode 100644 tests/lettuce/features/terrain/steps.py
 create mode 100644 tests/lettuce/features/terrain/terrain.py
 create mode 100644 tests/lettuce/features/xfrin_bind10.feature
 create mode 100755 tests/lettuce/setup_intree_bind10.sh.in
 create mode 100644 tests/system/common/rndc.conf
 create mode 100644 tests/system/common/rndc.key
 create mode 100644 tests/system/ixfr/README
 create mode 100644 tests/system/ixfr/b10-config.db.in
 create mode 100644 tests/system/ixfr/clean_ns.sh
 create mode 100644 tests/system/ixfr/common_tests.sh.in
 create mode 100644 tests/system/ixfr/db.example.common
 create mode 100644 tests/system/ixfr/db.example.n0.in
 create mode 100644 tests/system/ixfr/db.example.n2.in
 create mode 100644 tests/system/ixfr/db.example.n2.refresh.in
 create mode 100644 tests/system/ixfr/db.example.n4.in
 create mode 100644 tests/system/ixfr/db.example.n6.in
 create mode 120000 tests/system/ixfr/in-1/clean.sh
 create mode 100644 tests/system/ixfr/in-1/ns1/README
 create mode 100644 tests/system/ixfr/in-1/nsx2/README
 create mode 100644 tests/system/ixfr/in-1/setup.sh.in
 create mode 100644 tests/system/ixfr/in-1/tests.sh
 create mode 120000 tests/system/ixfr/in-2/clean.sh
 create mode 100644 tests/system/ixfr/in-2/ns1/README
 create mode 100644 tests/system/ixfr/in-2/nsx2/README
 create mode 100644 tests/system/ixfr/in-2/setup.sh.in
 create mode 100644 tests/system/ixfr/in-2/tests.sh
 create mode 120000 tests/system/ixfr/in-3/clean.sh
 create mode 100644 tests/system/ixfr/in-3/ns1/README
 create mode 100644 tests/system/ixfr/in-3/nsx2/README
 create mode 100644 tests/system/ixfr/in-3/setup.sh.in
 create mode 100644 tests/system/ixfr/in-3/tests.sh
 create mode 120000 tests/system/ixfr/in-4/clean.sh
 create mode 100644 tests/system/ixfr/in-4/ns1/README
 create mode 100644 tests/system/ixfr/in-4/nsx2/README
 create mode 100644 tests/system/ixfr/in-4/setup.sh.in
 create mode 100644 tests/system/ixfr/in-4/tests.sh
 create mode 100644 tests/system/ixfr/ixfr_init.sh.in
 create mode 100644 tests/system/ixfr/named_noixfr.conf
 create mode 100644 tests/system/ixfr/named_nonotify.conf
 create mode 100644 tests/system/ixfr/named_notify.conf
 delete mode 100755 tests/system/run.sh
 create mode 100755 tests/system/run.sh.in

-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 9bb8fed..434bd61 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,9 +1,474 @@
-TBD.	[func]		y-aharen
+339.	[func]		y-aharen
 	src/lib/statistics: Added statistics counter library for entire server
 	items and per zone items. Also, modified b10-auth to use it. It is
 	also intended to use in the other modules such as b10-resolver.
 	(Trac #510, git TBD)
 
+338.	[bug]		jinmei
+	b10-xfrin didn't check SOA serials of SOA and IXFR responses,
+	which resulted in unnecessary transfer or unexpected IXFR
+	timeouts (these issues were not overlooked but deferred to be
+	fixed until #1278 was completed).  Validation on responses to SOA
+	queries were tighten, too.
+	(Trac #1299, git 6ff03bb9d631023175df99248e8cc0cda586c30a)
+
+337.	[func]		tomek
+	libdhcp++: Support for DHCPv4 option that can store a single
+	address or a list of IPv4 addresses added. Support for END option
+	added.
+	(Trac #1350, git cc20ff993da1ddb1c6e8a98370438b45a2be9e0a)
+
+336.	[func]		jelte
+	libdns++ (and its python wrapper) now includes a class Serial, for 
+	SOA SERIAL comparison and addition. Operations on instances of this 
+	class follow the specification from RFC 1982. 
+	Rdata::SOA::getSerial() now returns values of this type (and not 
+	uint32_t).
+	(Trac #1278, git 2ae72d76c74f61a67590722c73ebbf631388acbd)
+
+335.	[bug]*		jelte
+	The DataSourceClientContainer class that dynamically loads 
+	datasource backend libraries no longer provides just a .so file name 
+	to its call to dlopen(), but passes it an absolute path. This means 
+	that it is no longer an system implementation detail that depends on 
+	[DY]LD_LIBRARY_PATH which file is chosen, should there be multiple 
+	options (for instance, when test-running a new build while a 
+	different version is installed).
+	These loadable libraries are also no longer installed in the default 
+	library path, but in a subdirectory of the libexec directory of the 
+	target ($prefix/libexec/[version]/backends).
+	This also removes the need to handle b10-xfin and b10-xfrout as 
+	'special' hardcoded components, and they are now started as regular 
+	components as dictated by the configuration of the boss process.
+	(Trac #1292, git 83ce13c2d85068a1bec015361e4ef8c35590a5d0)
+
+334.	[bug]		jinmei
+	b10-xfrout could potentially create an overflow response message
+	(exceeding the 64KB max) or could create unnecessarily small
+	messages.  The former was actually unlikely to happen due to the
+	effect of name compression, and the latter was marginal and at least
+	shouldn't cause an interoperability problem, but these were still
+	potential problems and were fixed.
+	(Trac #1389, git 3fdce88046bdad392bd89ea656ec4ac3c858ca2f)
+
+333.    [bug]		dvv
+	Solaris needs "-z now" to force non-lazy binding and prevent g++ static
+	initialization code from deadlocking.
+	(Trac #1439, git c789138250b33b6b08262425a08a2a0469d90433)
+
+332.    [bug]		vorner
+	C++ exceptions in the isc.dns.Rdata wrapper are now converted
+	to python ones instead of just aborting the interpretter.
+	(Trac #1407, git 5b64e839be2906b8950f5b1e42a3fadd72fca033)
+
+bind10-devel-20111128 released on November 28, 2011
+
+331.	[bug]		shane
+	Fixed a bug in data source library where a zone with more labels
+	than an out-of-bailiwick name server would cause an exception to
+	be raised.
+	(Trac #1430, git 81f62344db074bc5eea3aaf3682122fdec6451ad)
+
+330.	[bug]		jelte
+	Fixed a bug in b10-auth where it would sometimes fail because it
+	tried to check for queued msgq messages before the session was
+	fully running.
+	(git c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca)
+
+329.	[doc]		vorner, jreed
+	Document the bind10 run control configuration in guide and
+	manual page.
+	(Trac #1341, git c1171699a2b501321ab54207ad26e5da2b092d63)
+
+328.	[func]		jelte
+	b10-auth now passes IXFR requests on to b10-xfrout, and no longer
+	responds to them with NOTIMPL.
+	(Trac #1390, git ab3f90da16d31fc6833d869686e07729d9b8c135)
+
+327.	[func]		jinmei
+	b10-xfrout now supports IXFR.  (Right now there is no user
+	configurable parameter about this feature; b10-xfrout will
+	always respond to IXFR requests according to RFC1995).
+	(Trac #1371 and #1372, git 80c131f5b0763753d199b0fb9b51f10990bcd92b)
+
+326.	[build]*		jinmei
+	Added a check script for the SQLite3 schema version.  It will be
+	run at the beginning of 'make install', and if it detects an old
+	version of schema, installation will stop.  You'll then need to
+	upgrade the database file by following the error message.
+	(Trac #1404, git a435f3ac50667bcb76dca44b7b5d152f45432b57)
+
+325.	[func]		jinmei
+	Python isc.datasrc: added interfaces for difference management:
+	DataSourceClient.get_updater() now has the 'journaling' parameter
+	to enable storing diffs to the data source, and a new class
+	ZoneJournalReader was introduced to retrieve them, which can be
+	created by the new DataSourceClient.get_journal_reader() method.
+	(Trac #1333, git 3e19362bc1ba7dc67a87768e2b172c48b32417f5,
+	git 39def1d39c9543fc485eceaa5d390062edb97676)
+
+324.	[bug]		jinmei
+	Fixed reference leak in the isc.log Python module.  Most of all
+	BIND 10 Python programs had memory leak (even though the pace of
+	leak may be slow) due to this bug.
+	(Trac #1359, git 164d651a0e4c1059c71f56b52ea87ac72b7f6c77)
+
+323.	[bug]		jinmei
+	b10-xfrout incorrectly skipped adding TSIG RRs to some
+	intermediate responses (when TSIG is to be used for the
+	responses).  While RFC2845 optionally allows to skip intermediate
+	TSIGs (as long as the digest for the skipped part was included
+	in a later TSIG), the underlying TSIG API doesn't support this
+	mode of signing.
+	(Trac #1370, git 76fb414ea5257b639ba58ee336fae9a68998b30d)
+
+322.	[func]		jinmei
+	datasrc: Added C++ API for retrieving difference of two versions
+	of a zone.  A new ZoneJournalReader class was introduced for this
+	purpose, and a corresponding factory method was added to
+	DataSourceClient.
+	(Trac #1332, git c1138d13b2692fa3a4f2ae1454052c866d24e654)
+
+321.	[func]*		jinmei
+	b10-xfrin now installs IXFR differences into the underlying data
+	source (if it supports journaling) so that the stored differences
+	can be used for subsequent IXFR-out transactions.
+	Note: this is a backward incompatibility change for older sqlite3
+	database files.  They need to be upgraded to have a "diffs" table.
+	(Trac #1376, git 1219d81b49e51adece77dc57b5902fa1c6be1407)
+
+320.	[func]*		vorner
+	The --brittle switch was removed from the bind10 executable.
+	It didn't work after change #316 (Trac #213) and the same
+	effect can be accomplished by declaring all components as core.
+	(Trac #1340, git f9224368908dd7ba16875b0d36329cf1161193f0)
+
+319.	[func]		naokikambe
+	b10-stats-httpd was updated. In addition of the access to all
+	statistics items of all modules, the specified item or the items
+	of the specified module name can be accessed.  For example, the
+	URI requested by using the feature is showed as
+	"/bind10/statistics/xml/Auth" or
+	"/bind10/statistics/xml/Auth/queries.tcp". The list of all possible
+	module names and all possible item names can be showed in the
+	root document, whose URI is "/bind10/statistics/xml".  This change
+	is not only for the XML documents but also is for the XSD and
+	XSL documents.
+	(Trac #917, git b34bf286c064d44746ec0b79e38a6177d01e6956)
+
+318.	[func]		stephen
+	Add C++ API for accessing zone difference information in
+	database-based data sources.
+	(Trac #1330, git 78770f52c7f1e7268d99e8bfa8c61e889813bb33)
+
+317.	[func]		vorner
+	datasrc: the getUpdater method of DataSourceClient supports an
+	optional 'journaling' parameter to indicate the generated updater
+	to store diffs.  The database based derived class implements this
+	extension.
+	(Trac #1331, git 713160c9bed3d991a00b2ea5e7e3e7714d79625d)
+
+316.	[func]*		vorner
+	The configuration of what parts of the system run is more
+	flexible now.  Everything that should run must have an
+	entry in Boss/components.
+	(Trac #213, git 08e1873a3593b4fa06754654d22d99771aa388a6)
+
+315.	[func]		tomek
+	libdhcp: Support for DHCPv4 packet manipulation is now implemented.
+	All fixed fields are now supported. Generic support for DHCPv4
+	options is available (both parsing and assembly). There is no code
+	that uses this new functionality yet, so it is not usable directly
+	at this time. This code will be used by upcoming b10-dhcp4 daemon.
+	(Trac #1228, git 31d5a4f66b18cca838ca1182b9f13034066427a7)
+
+314.	[bug]		jelte
+	b10-xfrin would previously initiate incoming transfers upon 
+	receiving NOTIFY messages from any address (if the zone was 
+	known to b10-xfrin, and using the configured address). It now 
+	only starts a transfer if the source address from the NOTIFY 
+	packet matches the configured master address and port. This was 
+	really already fixed in release bind10-devel-20111014, but there 
+	were some deferred cleanups to add.
+	(Trac #1298, git 1177bfe30e17a76bea6b6447e14ae9be9e1ca8c2)
+
+313.	[func]		jinmei
+	datasrc: Added C++ API for adding zone differences to database
+	based data sources.  It's intended to be used for the support for
+	IXFR-in and dynamic update (so they can subsequently be retrieved
+	for IXFR-out).  The addRecordDiff method of the DatabaseAccessor
+	defines the interface, and a concrete implementation for SQLite3
+	was provided.
+	(Trac #1329, git 1aa233fab1d74dc776899df61181806679d14013)
+
+312.	[func]		jelte
+	Added an initial framework for doing system tests using the 
+	cucumber-based BDD tool Lettuce. A number of general steps are
+	included,  for instance running bind10 with specific
+	configurations, sending queries, and inspecting query answers. A
+	few very basic tests are included as well.
+	(Trac #1290, git 6b75c128bcdcefd85c18ccb6def59e9acedd4437)
+
+311.	[bug]		jelte
+	Fixed a bug in bindctl where tab-completion for names that
+	contain a hyphen resulted in unexpected behaviour, such as
+	appending the already-typed part again.
+	(Trac #1345, git f80ab7879cc29f875c40dde6b44e3796ac98d6da)
+
+310.	[bug]		jelte
+	Fixed a bug where bindctl could not set a value that is optional
+	and has no default, resulting in the error that the setting
+	itself was unknown. bindctl now correctly sees the setting and
+	is able to set it.
+	(Trac #1344, git 0e776c32330aee466073771600390ce74b959b38)
+
+309.	[bug]		jelte
+	Fixed a bug in bindctl where the removal of elements from a set
+	with default values was not stored, unless the set had been
+	modified in another way already.
+	(Trac #1343, git 25c802dd1c30580b94345e83eeb6a168ab329a33)
+
+308.	[build]		jelte
+	The configure script will now use pkg-config for finding
+	information about the Botan library. If pkg-config is unavailable,
+	or unaware of Botan, it will fall back to botan-config. It will
+	also use botan-config when a specific botan library directory is
+	given using the '--with-botan=' flag
+	(Trac #1194, git dc491833cf75ac1481ba1475795b0f266545013d)
+
+307.	[func]		vorner
+	When zone transfer in fails with IXFR, it is retried with AXFR
+	automatically.
+	(Trac #1279, git cd3588c9020d0310f949bfd053c4d3a4bd84ef88)
+
+306.	[bug]		stephen
+	Boss process now waits for the configuration manager to initialize
+	itself before continuing with startup.  This fixes a race condition
+	whereby the Boss could start the configuration manager and then
+	immediately start components that depended on that component being
+	fully initialized.
+	(Trac #1271, git 607cbae949553adac7e2a684fa25bda804658f61)
+
+305.	[bug]		jinmei
+	Python isc.dns, isc.datasrc, xfrin, xfrout: fixed reference leak
+	in Message.get_question(), Message.get_section(),
+	RRset.get_rdata(), and DataSourceClient.get_updater().
+	The leak caused severe memory leak in b10-xfrin, and (although no
+	one reported it) should have caused less visible leak in
+	b10-xfrout.  b10-xfrin had its own leak, which was also fixed.
+	(Trac #1028, git a72886e643864bb6f86ab47b115a55e0c7f7fcad)
+
+304.	[bug]		jelte
+	The run_bind10.sh test script now no longer runs processes from
+	an installed version of BIND 10, but will correctly use the
+	build tree paths.
+	(Trac #1246, git 1d43b46ab58077daaaf5cae3c6aa3e0eb76eb5d8)
+
+303.	[bug]		jinmei
+	Changed the installation path for the UNIX domain file used
+	for the communication between b10-auth and b10-xfrout to a
+	"@PACKAGE@" subdirectory (e.g. from /usr/local/var to
+	/usr/local/var/bind10-devel).  This should be transparent change
+	because this file is automatically created and cleaned up, but
+	if the old file somehow remains, it can now be safely removed.
+	(Trac #869, git 96e22f4284307b1d5f15e03837559711bb4f580c)
+
+302.	[bug]		jelte
+	msgq no longer crashes if the remote end is closed while msgq
+	tries to send data. It will now simply drop the message and close
+	the connection itself.
+	(Trac #1180, git 6e68b97b050e40e073f736d84b62b3e193dd870a)
+
+301.	[func]		stephen
+	Add system test for IXFR over TCP.
+	(Trac #1213, git 68ee3818bcbecebf3e6789e81ea79d551a4ff3e8)
+
+300.	[func]*		tomek
+	libdhcp: DHCP packet library was implemented. Currently it handles
+	packet reception, option parsing, option generation and output
+	packet building. Generic and specialized classes for several
+	DHCPv6 options (IA_NA, IAADDR, address-list) are available. A
+	simple code was added that leverages libdhcp. It is a skeleton
+	DHCPv6 server. It receives incoming SOLICIT and REQUEST messages
+	and responds with proper ADVERTISE and REPLY. Note that since
+	LeaseManager is not implemented, server assigns the same
+	hardcoded lease for every client. This change removes existing
+	DHCPv6 echo server as it was only a proof of concept code.
+	(Trac #1186, git 67ea6de047d4dbd63c25fe7f03f5d5cc2452ad7d)
+
+299.	[build]		jreed
+	Do not install the libfake_session, libtestutils, or libbench
+	libraries. They are used by tests within the source tree.
+	Convert all test-related makefiles to build test code at
+	regular make time to better work with test-driven development.
+	This reverts some of #1901. (The tests are ran using "make
+	check".)
+	(Trac #1286, git cee641fd3d12341d6bfce5a6fbd913e3aebc1e8e)
+
+bind10-devel-20111014 released on October 14, 2011
+
+298.	[doc]		jreed
+	Shorten README. Include plain text format of the Guide.
+	(git d1897d3, git 337198f)
+
+297.	[func]		dvv
+	Implement the SPF rrtype according to RFC4408.
+	(Trac #1140, git 146934075349f94ee27f23bf9ff01711b94e369e)
+
+296.	[build]		jreed
+	Do not install the unittest libraries. At this time, they
+	are not useful without source tree (and they may or may
+	not have googletest support). Also, convert several makefiles
+	to build tests at "check" time and not build time.
+	(Trac #1091, git 2adf4a90ad79754d52126e7988769580d20501c3)
+
+295.	[bug]		jinmei
+	__init__.py for isc.dns was installed in the wrong directory,
+	which would now make xfrin fail to start.  It was also bad
+	in that it replaced any existing __init__.py in th public
+	site-packages directory.  After applying this fix You may want to
+	check if the wrong init file is in the wrong place, in which
+	case it should be removed.
+	(Trac #1285, git af3b17472694f58b3d6a56d0baf64601b0f6a6a1)
+
+294.	[func]		jelte, jinmei, vorner
+	b10-xfrin now supports incoming IXFR.  See BIND 10 Guide for
+	how to configure it and operational notes.
+	(Trac #1212, multiple git merges)
+
+293.	[func]*		tomek
+	b10-dhcp6: Implemented DHCPv6 echo server. It joins DHCPv6
+	multicast groups and listens to incoming DHCPv6 client messages.
+	Received messages are then echoed back to clients. This
+	functionality is limited, but it can be used to test out client
+	resiliency to unexpected messages. Note that network interface
+	detection routines are not implemented yet, so interface name
+	and its address must be specified in interfaces.txt.
+	(Trac #878, git 3b1a604abf5709bfda7271fa94213f7d823de69d)
+
+292.	[func]		dvv
+	Implement the DLV rrtype according to RFC4431.
+	(Trac #1144, git d267c0511a07c41cd92e3b0b9ee9bf693743a7cf)
+
+291.	[func]		naokikambe
+	Statistics items are specified by each module's spec file.
+	Stats module can read these through the config manager. Stats
+	module and stats httpd report statistics data and statistics
+	schema by each module via both bindctl and HTTP/XML.
+	(Trac #928,#929,#930,#1175,
+	git 054699635affd9c9ecbe7a108d880829f3ba229e)
+
+290.	[func]		jinmei
+	libdns++/pydnspp: added an option parameter to the "from wire"
+	methods of the Message class.  One option is defined,
+	PRESERVE_ORDER, which specifies the parser to handle each RR
+	separately, preserving the order, and constructs RRsets in the
+	message sections so that each RRset contains only one RR.
+	(Trac #1258, git c874cb056e2a5e656165f3c160e1b34ccfe8b302)
+
+289.	[func]*		jinmei
+	b10-xfrout: ACLs for xfrout can now be configured per zone basis.
+	A per zone ACL is part of a more general zone configuration.  A
+	quick example for configuring an ACL for zone "example.com" that
+	rejects any transfer request for that zone is as follows:
+	> config add Xfrout/zone_config
+	> config set Xfrout/zone_config[0]/origin "example.com"
+	> config add Xfrout/zone_config[0]/transfer_acl
+	> config set Xfrout/zone_config[0]/transfer_acl[0] {"action": "REJECT"}
+	The previous global ACL (query_acl) was renamed to transfer_acl,
+	which now works as the default ACL.  Note: backward compatibility
+	is not provided, so an existing configuration using query_acl
+	needs to be updated by hand.
+	Note: the per zone configuration framework is a temporary
+	workaround.  It will eventually be redesigned as a system wide
+	configuration.
+	(Trac #1165, git 698176eccd5d55759fe9448b2c249717c932ac31)
+
+288.	[bug]		stephen
+	Fixed problem whereby the order in which component files appeared in
+	rdataclass.cc was system dependent, leading to problems on some
+	systems where data types were used before the header file in which
+	they were declared was included.
+	(Trac #1202, git 4a605525cda67bea8c43ca8b3eae6e6749797450)
+
+287.	[bug]*		jinmei
+	Python script files for log messages (xxx_messages.py) should have
+	been installed under the "isc" package.  This fix itself should
+	be a transparent change without affecting existing configurations
+	or other operational practices, but you may want to clean up the
+	python files from the common directly (such as "site-packages").
+	(Trac #1101, git 0eb576518f81c3758c7dbaa2522bd8302b1836b3)
+
+286.	[func]		ocean
+	libdns++: Implement the HINFO rrtype support according to RFC1034,
+	and RFC1035.
+	(Trac #1112, git 12d62d54d33fbb1572a1aa3089b0d547d02924aa)
+
+285.	[bug]		jelte
+	sqlite3 data source: fixed a race condition on initial startup,
+	when the database has not been initialized yet, and multiple
+	processes are trying to do so, resulting in one of them failing.
+	(Trac #326, git 5de6f9658f745e05361242042afd518b444d7466)
+
+284.	[bug]		jerry
+	b10-zonemgr: zonemgr will not terminate on empty zones, it will
+	log a warning and try to do zone transfer for them.
+	(Trac #1153, git 0a39659638fc68f60b95b102968d7d0ad75443ea)
+
+283.	[bug]		zhanglikun
+	Make stats and boss processes wait for answer messages from each
+	other in block mode to avoid orphan answer messages, add an internal
+	command "getstats" to boss process for getting statistics data from
+	boss.
+	(Trac #519, git 67d8e93028e014f644868fede3570abb28e5fb43)
+
+282.	[func]		ocean
+	libdns++: Implement the NAPTR rrtype according to RFC2915,
+	RFC2168 and RFC3403.
+	(Trac #1130, git 01d8d0f13289ecdf9996d6d5d26ac0d43e30549c)
+
+bind10-devel-20110819 released on August 19, 2011
+
+281.	[func]		jelte
+	Added a new type for configuration data: "named set". This allows for
+	similar configuration as the current "list" type, but with strings
+	instead of indices as identifiers. The intended use is for instance
+	/foo/zones/example.org/bar instead of /foo/zones[2]/bar. Currently
+	this new type is not in use yet.
+	(Trac #926, git 06aeefc4787c82db7f5443651f099c5af47bd4d6)
+
+280.	[func]		jerry
+	libdns++: Implement the MINFO rrtype according to RFC1035.
+	(Trac #1113, git 7a9a19d6431df02d48a7bc9de44f08d9450d3a37)
+
+279.	[func]		jerry
+	libdns++: Implement the AFSDB rrtype according to RFC1183.
+	(Trac #1114, git ce052cd92cd128ea3db5a8f154bd151956c2920c)
+
+278.	[doc]		jelte
+	Add logging configuration documentation to the guide.
+	(Trac #1011, git 2cc500af0929c1f268aeb6f8480bc428af70f4c4)
+
+277.	[func]		jerry
+	libdns++: Implement the SRV rrtype according to RFC2782.
+	(Trac #1128, git 5fd94aa027828c50e63ae1073d9d6708e0a9c223)
+
+276.	[func]		stephen
+	Although the top-level loggers are named after the program (e.g.
+	b10-auth, b10-resolver), allow the logger configuration to omit the
+	"b10-" prefix and use just the module name.
+	(Trac #1003, git a01cd4ac5a68a1749593600c0f338620511cae2d)
+
+275.	[func]		jinmei
+	Added support for TSIG key matching in ACLs.  The xfrout ACL can
+	now refer to TSIG key names using the "key" attribute.  For
+	example, the following specifies an ACL that allows zone transfer
+	if and only if the request is signed with a TSIG of a key name
+	"key.example":
+	> config set Xfrout/query_acl[0] {"action": "ACCEPT", \
+	                                  "key": "key.example"}
+	(Trac #1104, git 9b2e89cabb6191db86f88ee717f7abc4171fa979)
+
 274.	[bug]		naokikambe
 	add unittests for functions xml_handler, xsd_handler and xsl_handler
 	respectively to make sure their behaviors are correct, regardless of
@@ -11,7 +476,7 @@ TBD.	[func]		y-aharen
 	returns is str or byte.
 	(Trac #1021, git 486bf91e0ecc5fbecfe637e1e75ebe373d42509b)
 
-273.    [func]		vorner
+273.	[func]		vorner
 	It is possible to specify ACL for the xfrout module. It is in the ACL
 	configuration key and has the usual ACL syntax. It currently supports
 	only the source address. Default ACL accepts everything.
@@ -190,7 +655,7 @@ bind10-devel-20110705 released on July 05, 2011
 	(Trac #542, git 1aa773d84cd6431aa1483eb34a7f4204949a610f)
 
 243.	[func]*		feng
-	Add optional hmac algorithm SHA224/384/812.
+	Add optional hmac algorithm SHA224/384/512.
 	(Trac #782, git 77d792c9d7c1a3f95d3e6a8b721ac79002cd7db1)
 
 bind10-devel-20110519 released on May 19, 2011
diff --git a/Makefile.am b/Makefile.am
index b07ef0f..cc91a56 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,13 +1,17 @@
-SUBDIRS = doc src tests
+SUBDIRS = compatcheck doc src tests
 USE_LCOV=@USE_LCOV@
 LCOV=@LCOV@
 GENHTML=@GENHTML@
+DISTCHECK_GTEST_CONFIGURE_FLAG=@DISTCHECK_GTEST_CONFIGURE_FLAG@
 
 DISTCLEANFILES = config.report
 
 # When running distcheck target, do not install the configurations
 DISTCHECK_CONFIGURE_FLAGS = --disable-install-configurations
 
+# Use same --with-gtest flag if set
+DISTCHECK_CONFIGURE_FLAGS += $(DISTCHECK_GTEST_CONFIGURE_FLAG)
+
 clean-cpp-coverage:
 	@if [ $(USE_LCOV) = yes ] ; then \
 		$(LCOV) --directory . --zerocounters; \
diff --git a/README b/README
index a6509da..99e2ece 100644
--- a/README
+++ b/README
@@ -1,3 +1,4 @@
+
 This is the source for the development version of BIND 10.
 
 BIND is the popular implementation of a DNS server, developer
@@ -8,10 +9,10 @@ for serving, maintaining, and developing DNS.
 BIND10-devel is new development leading up to the production
 BIND 10 release. It contains prototype code and experimental
 interfaces. Nevertheless it is ready to use now for testing the
-new BIND 10 infrastructure ideas. The Year 2 milestones of the
-five year plan are described here:
+new BIND 10 infrastructure ideas. The Year 3 goals of the five
+year plan are described here:
 
-	https://bind10.isc.org/wiki/Year2Milestones
+        http://bind10.isc.org/wiki/Year3Goals
 
 This release includes the bind10 master process, b10-msgq message
 bus, b10-auth authoritative DNS server (with SQLite3 and in-memory
@@ -21,12 +22,15 @@ AXFR inbound service, b10-xfrout outgoing AXFR service, b10-zonemgr
 secondary manager, b10-stats statistics collection and reporting
 daemon, b10-stats-httpd for HTTP access to XML-formatted stats,
 b10-host DNS lookup utility, and a new libdns++ library for C++
-with a python wrapper.
+with a python wrapper. BIND 10 also provides an experimental DHCPv6
+echo server, b10-dhcp6.
 
-Documentation is included and also available via the BIND 10
-website at http://bind10.isc.org/
+Documentation is included with the source. See doc/guide/bind10-guide.txt
+(or bind10-guide.html) for installation instructions.  The
+documentation is also available via the BIND 10 website at
+http://bind10.isc.org/
 
-The latest released source may be downloaded from:
+The latest released source tar file may be downloaded from:
 
         ftp://ftp.isc.org/isc/bind10/
 
@@ -40,15 +44,11 @@ Bugs may be reported as tickets via the developers website:
 
         http://bind10.isc.org/
 
-BUILDING
-
-See the Guide for detailed installation directions at
-doc/guide/bind10-guide.html.
-
-Simple build instructions:
+Simple build and installation instructions:
 
   ./configure
   make
+  make install
 
 If building from Git repository, run:
 
@@ -56,197 +56,11 @@ If building from Git repository, run:
 
 before running ./configure
 
-Requires autoconf 2.59 or newer.
-
-Use automake-1.11 or better for working Python 3.1 tests.
-Alternatively, you could manually specify an absolute path to python
-executable by the --with-pythonpath option of the configure script,
-e.g.,
-% ./configure --with-pythonpath=/usr/local/bin/python3.1
-
-Operating-System specific tips:
-
-- FreeBSD
-  You may need to install a python binding for sqlite3 by hand.  A
-  sample procedure is as follows:
-  - add the following to /etc/make.conf
-    PYTHON_VERSION=3.1
-  - build and install the python binding from ports, assuming the top
-    directory of the ports system is /usr/ports
-  % cd /usr/ports/databases/py-sqlite3/
-  % make
-  % sudo make install
-
-INSTALLATION
+See the Guide for detailed installation directions at
+doc/guide/bind10-guide.txt.
 
-Install with:
+For operating system specific tips see the wiki at:
 
-  make install
+       http://bind10.isc.org/wiki/SystemSpecificNotes
 
-TESTS
-
-The tests use the googletests framework for C++. It is available
-from http://code.google.com/p/googletest/.  To enable the tests,
-configure BIND 10 with: 
-
-  ./configure --with-gtest
-
-Then run "make check" to run these tests.
-
-TEST COVERAGE
-
-Code coverage reports may be generated using make. These are
-based on running on the unit tests. The resulting reports are placed
-in coverage-cpp-html and coverage-python-html directories for C++
-and Python, respectively.
-
-The code coverage report for the C++ tests uses LCOV. It is available
-from http://ltp.sourceforge.net/. To generate the HTML report,
-first configure BIND 10 with:
- 
-  ./configure --with-lcov
-
-The code coverage report for the Python tests uses coverage.py (aka
-pycoverage). It is available from http://nedbatchelder.com/code/coverage/.
-To generate the HTML report, first configure BIND 10 with:
-
-  ./configure --with-pycoverage
-
-Doing code coverage tests:
-
-  make coverage
-	Does the clean, perform, and report targets for C++ and Python.
-
-  make clean-coverage
-	Zeroes the code coverage counters and removes the HTML reports
-	for C++ and Python.
-
-  make perform-coverage
-	Runs the C++ (using the googletests framework) and Python
-	tests.
-
-  make report-coverage
-	Generates the coverage reports in HTML for C++ and Python.
-
-  make clean-cpp-coverage
-	Zeroes the code coverage counters and removes the HTML report
-	for the C++ tests.
-
-  make clean-python-coverage
-	Zeroes the code coverage counters and removes the HTML report
-	for the Python tests.
-
-  make report-cpp-coverage
-	Generates the coverage report in HTML for C++, excluding
-	some unrelated headers.  The HTML reports are placed in a
-	directory called coverage-cpp-html/.
-
-  make report-python-coverage
-	Generates the coverage report in HTML for Python. The HTML
-	reports are placed in a directory called coverage-python-html/.
-
-DEVELOPERS
-
-The generated run_*.sh scripts available in the src/bin directories
-are for running the code using the source tree.
-
-RUNNING
-
-You can start the BIND 10 processes by running bind10 which is
-installed to the sbin directory under the installation prefix.
-The default location is:
-
-  /usr/local/sbin/bind10
-
-For development work, you can also run the bind10 services from the
-source tree:
-
- ./src/bin/bind10/run_bind10.sh 
-
-(Which will use the modules and configurations also from the source
-tree.)
-
-CONFIGURATION
-
-Commands can be given through the bindctl tool.
-
-The server must be running for bindctl to work.
-
-The following configuration commands are available
-
-help: show the different command modules
-<module> help: show the commands for module
-<module> <command> help: show info for the command
-
-
-config show [identifier]: Show the currently set values. If no identifier is
-                          given, the current location is used. If a config
-                          option is a list or a map, the value is not
-                          shown directly, but must be requested separately.
-config go [identifier]:   Go to the given location within the configuration.
-config set [identifier] <value>: Set a configuration value.
-config unset [identifier]: Remove a value (reverts to default if the option
-                           is mandatory).
-config add [identifier] <value>: add a value to a list
-config remove [identifier] <value>: remove a value from a list 
-config revert:	Revert all changes that have not been committed
-config commit: Commit all changes
-config diff: Show the changes that have not been committed yet
-
-
-EXAMPLE SESSION
-
-~> bindctl
-["login success "] login as root
-> help
-BindCtl, verstion 0.1
-usage: <module name> <command name> [param1 = value1 [, param2 = value2]]
-Type Tab character to get the hint of module/command/paramters.
-Type "help(? h)" for help on bindctl.
-Type "<module_name> help" for help on the specific module.
-Type "<module_name> <command_name> help" for help on the specific command.
-
-Available module names: 
-	 help 	Get help for bindctl
-	 config 	Configuration commands
-	 Xfrin 	same here
-	 Auth 	same here
-	 Boss 	same here
-> config help
-Module  config 	Configuration commands 
-Available commands:
-	 help 	(Get help for module)
-	 show 	(Show configuration)
-	 add 	(Add entry to configuration list)
-	 remove 	(Remove entry from configuration list)
-	 set 	(Set a configuration value)
-	 unset 	(Unset a configuration value)
-	 diff 	(Show all local changes)
-	 revert 	(Revert all local changes)
-	 commit 	(Commit all local changes)
-	 go 	(Go to a specific configuration part)
-> config show
-Xfrin/	module	
-Auth/	module	
-Boss/	module	
-> config show Xfrin
-transfers_in:	10	integer	
-> config go Auth
-/Auth> config show
-database_file:	None	string	
-/Auth> config set database_file /tmp/bind10_zones.db
-/Auth> config commit
-/Auth> config go /
-> config show Auth/
-database_file:	/tmp/bind10_zones.db	string	
-> config diff
-{}
-> config set Auth/foobar
-Error: missing identifier or value
-> config set Auth/database_file foobar
-> config diff
-{'Auth': {'database_file': 'foobar'}}
-> config revert
-> config diff
-{}
-> quit
+Please see the wiki and the doc/ directory for various documentation.
diff --git a/compatcheck/Makefile.am b/compatcheck/Makefile.am
new file mode 100644
index 0000000..029578d
--- /dev/null
+++ b/compatcheck/Makefile.am
@@ -0,0 +1,8 @@
+noinst_SCRIPTS = sqlite3-difftbl-check.py
+
+# We're going to abuse install-data-local for a pre-install check.
+# This is to be considered a short term hack and is expected to be removed
+# in a near future version.
+install-data-local:
+	$(PYTHON) sqlite3-difftbl-check.py \
+	$(localstatedir)/$(PACKAGE)/zone.sqlite3
diff --git a/compatcheck/README b/compatcheck/README
new file mode 100644
index 0000000..8381e60
--- /dev/null
+++ b/compatcheck/README
@@ -0,0 +1,5 @@
+This directory is a collection of compatibility checker programs.
+They will be run before any other installation attempts on 'make install'
+to see if the installation causes any substantial compatibility problems
+with existing configuratons.  If any checker program finds an issue,
+'make install' will stop at that point.
diff --git a/compatcheck/sqlite3-difftbl-check.py.in b/compatcheck/sqlite3-difftbl-check.py.in
new file mode 100755
index 0000000..e3b7b91
--- /dev/null
+++ b/compatcheck/sqlite3-difftbl-check.py.in
@@ -0,0 +1,60 @@
+#!@PYTHON@
+
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os, sqlite3, sys
+from optparse import OptionParser
+
+usage = 'usage: %prog [options] db_file'
+parser = OptionParser(usage=usage)
+parser.add_option("-u", "--upgrade", action="store_true",
+                  dest="upgrade", default=False,
+                  help="Upgrade the database file [default: %default]")
+(options, args) = parser.parse_args()
+if len(args) == 0:
+    parser.error('missing argument')
+
+db_file = args[0]
+
+# If the file doesn't exist, there's nothing to do
+if not os.path.exists(db_file):
+    sys.exit(0)
+
+conn = sqlite3.connect(db_file)
+cur = conn.cursor()
+try:
+    # This can be anything that works iff the "diffs" table exists
+    cur.execute('SELECT name FROM diffs DESC LIMIT 1')
+except sqlite3.OperationalError as ex:
+    # If it fails with 'no such table', create a new one or fail with
+    # warning depending on the --upgrade command line option.
+    if str(ex) == 'no such table: diffs':
+        if options.upgrade:
+            cur.execute('CREATE TABLE diffs (id INTEGER PRIMARY KEY, ' +
+                        'zone_id INTEGER NOT NULL, ' +
+                        'version INTEGER NOT NULL, ' +
+                        'operation INTEGER NOT NULL, ' +
+                        'name STRING NOT NULL COLLATE NOCASE, ' +
+                        'rrtype STRING NOT NULL COLLATE NOCASE, ' +
+                        'ttl INTEGER NOT NULL, rdata STRING NOT NULL)')
+        else:
+            sys.stdout.write('Found an older version of SQLite3 DB file: ' +
+                             db_file + '\n' + "Perform '" + os.getcwd() +
+                             "/sqlite3-difftbl-check.py --upgrade " +
+                             db_file + "'\n" +
+                             'before continuing install.\n')
+            sys.exit(1)
+conn.close()
diff --git a/configure.ac b/configure.ac
index 261cf37..a5f8e87 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
 # Process this file with autoconf to produce a configure script.
 
 AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20110519, bind10-dev at isc.org)
+AC_INIT(bind10-devel, 20111129, bind10-dev at isc.org)
 AC_CONFIG_SRCDIR(README)
 AM_INIT_AUTOMAKE
 AC_CONFIG_HEADERS([config.h])
@@ -12,6 +12,12 @@ AC_PROG_CXX
 
 # Libtool configuration
 #
+
+# libtool cannot handle spaces in paths, so exit early if there is one
+if [ test `echo $PWD | grep -c ' '` != "0"  ]; then
+    AC_MSG_ERROR([BIND 10 cannot be built in a directory that contains spaces, because of libtool limitations. Please change the directory name, or use a symbolic link that does not contain spaces.])
+fi
+
 # On FreeBSD (and probably some others), clang++ does not meet an autoconf
 # assumption in identifying libtool configuration regarding shared library:
 # the configure script will execute "$CC -shared $CFLAGS/$CXXFLAGS -v" and
@@ -90,6 +96,8 @@ case "$host" in
 	# Solaris requires special definitions to get some standard libraries
 	# (e.g. getopt(3)) available with common used header files.
 	CPPFLAGS="$CPPFLAGS -D_XPG4_2 -D__EXTENSIONS__"
+	# "now" binding is necessary to prevent deadlocks in C++ static initialization code
+	LDFLAGS="$LDFLAGS -z now"
 	;;
 *-apple-darwin*)
 	# libtool doesn't work perfectly with Darwin: libtool embeds the
@@ -101,6 +109,12 @@ case "$host" in
 	SET_ENV_LIBRARY_PATH=yes
 	ENV_LIBRARY_PATH=DYLD_LIBRARY_PATH
 	;;
+*-freebsd*)
+	SET_ENV_LIBRARY_PATH=yes
+	;;
+*-netbsd*)
+	SET_ENV_LIBRARY_PATH=yes
+	;;
 esac
 AM_CONDITIONAL(SET_ENV_LIBRARY_PATH, test $SET_ENV_LIBRARY_PATH = yes)
 AC_SUBST(SET_ENV_LIBRARY_PATH)
@@ -149,6 +163,16 @@ fi
 PYTHON_SITEPKG_DIR=${pyexecdir}
 AC_SUBST(PYTHON_SITEPKG_DIR)
 
+# This will be commonly used in various Makefile.am's that need to generate
+# python log messages.
+PYTHON_LOGMSGPKG_DIR="\$(top_builddir)/src/lib/python/isc/log_messages"
+AC_SUBST(PYTHON_LOGMSGPKG_DIR)
+
+# This is python package paths commonly used in python tests.  See
+# README of log_messages for why it's included.
+COMMON_PYTHON_PATH="\$(abs_top_builddir)/src/lib/python/isc/log_messages:\$(abs_top_srcdir)/src/lib/python:\$(abs_top_builddir)/src/lib/python"
+AC_SUBST(COMMON_PYTHON_PATH)
+
 # Check for python development environments
 if test -x ${PYTHON}-config; then
 	PYTHON_INCLUDES=`${PYTHON}-config --includes`
@@ -270,6 +294,8 @@ B10_CXXFLAGS="-Wall -Wextra -Wwrite-strings -Woverloaded-virtual -Wno-sign-compa
 case "$host" in
 *-solaris*)
 	MULTITHREADING_FLAG=-pthreads
+	# In Solaris, IN6ADDR_ANY_INIT and IN6ADDR_LOOPBACK_INIT need -Wno-missing-braces
+	B10_CXXFLAGS="$B10_CXXFLAGS -Wno-missing-braces"
 	;;
 *)
 	MULTITHREADING_FLAG=-pthread
@@ -419,7 +445,7 @@ AC_ARG_WITH([botan],
   AC_HELP_STRING([--with-botan=PATH],
     [specify exact directory of Botan library]),
     [botan_path="$withval"])
-if test "${botan_path}" == "no" ; then
+if test "${botan_path}" = "no" ; then
     AC_MSG_ERROR([Need botan for libcryptolink])
 fi
 if test "${botan_path}" != "yes" ; then
@@ -429,42 +455,65 @@ if test "${botan_path}" != "yes" ; then
         AC_MSG_ERROR([${botan_path}/bin/botan-config not found])
     fi
 else
+    # First see if pkg-config knows of it.
+    # Unfortunately, the botan.pc files have their minor version in them
+    # too, so we need to try them one by one
+    BOTAN_CONFIG=""
+    AC_PATH_PROG([PKG_CONFIG], [pkg-config])
+    if test "$PKG_CONFIG" != "" ; then
+        BOTAN_VERSIONS="botan-1.10 botan-1.9 botan-1.8"
+        for version in $BOTAN_VERSIONS; do
+            AC_MSG_CHECKING([Checking botan version with pkg-config $version])
+            
+            if [ $PKG_CONFIG --exists ${version} ]; then
+                AC_MSG_RESULT([found])
+                BOTAN_CONFIG="$PKG_CONFIG ${version}"
+                break
+            else
+                AC_MSG_RESULT([not found])
+            fi
+        done
+    fi
+    # If we had no pkg-config, or it didn't know about botan, use botan-config
+    if test "$BOTAN_CONFIG" = "" ; then
         AC_PATH_PROG([BOTAN_CONFIG], [botan-config])
+    fi
 fi
 
-if test -x "${BOTAN_CONFIG}" ; then
-    BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
-    # We expect botan-config --libs to contain -L<path_to_libbotan>, but
-    # this is not always the case.  As a heuristics workaround we add
-    # -L`botan-config --prefix/lib` in this case.  Same for BOTAN_INCLUDES
-    # (but using include instead of lib) below.
+BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
+BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
+
+# We expect botan-config --libs to contain -L<path_to_libbotan>, but
+# this is not always the case.  As a heuristics workaround we add
+# -L`botan-config --prefix/lib` in this case.  Same for BOTAN_INCLUDES
+# (but using include instead of lib) below.
+if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
     echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
-	    BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
-    BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
+        BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
     echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
-	    BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
-    # See python_rpath for some info on why we do this
-    if test $rpath_available = yes; then
-        BOTAN_RPATH=
-        for flag in ${BOTAN_LDFLAGS}; do
-                BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
-        done
-	AC_SUBST(BOTAN_RPATH)
-
-	# According to the libtool manual, it should be sufficient if we
-	# specify the "-R libdir" in our wrapper library of botan (no other
-	# programs will need libbotan directly); "libdir" should be added to
-	# the program's binary image.  But we've seen in our build environments
-	# that (some versions of?) libtool doesn't propagate -R as documented,
-	# and it caused a linker error at run time.  To work around this, we
-	# also add the rpath to the global LDFLAGS.
-        LDFLAGS="$BOTAN_RPATH $LDFLAGS"
-    fi
-
-    AC_SUBST(BOTAN_LDFLAGS)
-    AC_SUBST(BOTAN_INCLUDES)
+        BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
+fi
+# See python_rpath for some info on why we do this
+if test $rpath_available = yes; then
+    BOTAN_RPATH=
+    for flag in ${BOTAN_LDFLAGS}; do
+            BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
+    done
+AC_SUBST(BOTAN_RPATH)
+
+# According to the libtool manual, it should be sufficient if we
+# specify the "-R libdir" in our wrapper library of botan (no other
+# programs will need libbotan directly); "libdir" should be added to
+# the program's binary image.  But we've seen in our build environments
+# that (some versions of?) libtool doesn't propagate -R as documented,
+# and it caused a linker error at run time.  To work around this, we
+# also add the rpath to the global LDFLAGS.
+    LDFLAGS="$BOTAN_RPATH $LDFLAGS"
 fi
 
+AC_SUBST(BOTAN_LDFLAGS)
+AC_SUBST(BOTAN_INCLUDES)
+
 CPPFLAGS_SAVED=$CPPFLAGS
 CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
 LDFLAGS_SAVED="$LDFLAGS"
@@ -492,7 +541,7 @@ AC_ARG_WITH([log4cplus],
   AC_HELP_STRING([--with-log4cplus=PATH],
     [specify exact directory of log4cplus library and headers]),
     [log4cplus_path="$withval"])
-if test "${log4cplus_path}" == "no" ; then
+if test "${log4cplus_path}" = "no" ; then
     AC_MSG_ERROR([Need log4cplus])
 elif test "${log4cplus_path}" != "yes" ; then
   LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
@@ -632,6 +681,7 @@ fi
 #
 if test "$gtest_path" != "no"
 then
+	DISTCHECK_GTEST_CONFIGURE_FLAG="--with-gtest=\"$gtest_path\""
 	if test "$gtest_path" != "yes"; then
 		GTEST_PATHS=$gtest_path
 		if test -x "${gtest_path}/bin/gtest-config" ; then
@@ -672,8 +722,10 @@ else
 	GTEST_INCLUDES=
 	GTEST_LDFLAGS=
 	GTEST_LDADD=
+	DISTCHECK_GTEST_CONFIGURE_FLAG=
 fi
 AM_CONDITIONAL(HAVE_GTEST, test $gtest_path != "no")
+AC_SUBST(DISTCHECK_GTEST_CONFIGURE_FLAG)
 AC_SUBST(GTEST_INCLUDES)
 AC_SUBST(GTEST_LDFLAGS)
 AC_SUBST(GTEST_LDADD)
@@ -749,6 +801,8 @@ fi
 #
 AC_PATH_PROGS(PERL, perl5 perl)
 AC_SUBST(PERL)
+AC_PATH_PROGS(AWK, gawk awk)
+AC_SUBST(AWK)
 
 AC_ARG_ENABLE(man, [AC_HELP_STRING([--enable-man],
   [regenerate man pages [default=no]])], enable_man=yes, enable_man=no)
@@ -764,6 +818,7 @@ AM_CONDITIONAL(INSTALL_CONFIGURATIONS, test x$install_configurations = xyes || t
 AC_CONFIG_FILES([Makefile
                  doc/Makefile
                  doc/guide/Makefile
+                 compatcheck/Makefile
                  src/Makefile
                  src/bin/Makefile
                  src/bin/bind10/Makefile
@@ -793,19 +848,13 @@ AC_CONFIG_FILES([Makefile
                  src/bin/sockcreator/tests/Makefile
                  src/bin/xfrin/Makefile
                  src/bin/xfrin/tests/Makefile
+                 src/bin/xfrin/tests/testdata/Makefile
                  src/bin/xfrout/Makefile
                  src/bin/xfrout/tests/Makefile
                  src/bin/zonemgr/Makefile
                  src/bin/zonemgr/tests/Makefile
                  src/bin/stats/Makefile
                  src/bin/stats/tests/Makefile
-                 src/bin/stats/tests/isc/Makefile
-                 src/bin/stats/tests/isc/cc/Makefile
-                 src/bin/stats/tests/isc/config/Makefile
-                 src/bin/stats/tests/isc/util/Makefile
-                 src/bin/stats/tests/isc/log/Makefile
-                 src/bin/stats/tests/testdata/Makefile
-                 src/bin/stats/tests/http/Makefile
                  src/bin/usermgr/Makefile
                  src/bin/tests/Makefile
                  src/lib/Makefile
@@ -826,17 +875,24 @@ AC_CONFIG_FILES([Makefile
                  src/lib/python/isc/util/tests/Makefile
                  src/lib/python/isc/datasrc/Makefile
                  src/lib/python/isc/datasrc/tests/Makefile
+                 src/lib/python/isc/dns/Makefile
                  src/lib/python/isc/cc/Makefile
                  src/lib/python/isc/cc/tests/Makefile
                  src/lib/python/isc/config/Makefile
                  src/lib/python/isc/config/tests/Makefile
                  src/lib/python/isc/log/Makefile
                  src/lib/python/isc/log/tests/Makefile
+                 src/lib/python/isc/log_messages/Makefile
+                 src/lib/python/isc/log_messages/work/Makefile
                  src/lib/python/isc/net/Makefile
                  src/lib/python/isc/net/tests/Makefile
                  src/lib/python/isc/notify/Makefile
                  src/lib/python/isc/notify/tests/Makefile
                  src/lib/python/isc/testutils/Makefile
+                 src/lib/python/isc/bind10/Makefile
+                 src/lib/python/isc/bind10/tests/Makefile
+                 src/lib/python/isc/xfrin/Makefile
+                 src/lib/python/isc/xfrin/tests/Makefile
                  src/lib/config/Makefile
                  src/lib/config/tests/Makefile
                  src/lib/config/tests/testdata/Makefile
@@ -848,10 +904,13 @@ AC_CONFIG_FILES([Makefile
                  src/lib/dns/python/Makefile
                  src/lib/dns/python/tests/Makefile
                  src/lib/dns/benchmarks/Makefile
+                 src/lib/dhcp/Makefile
+                 src/lib/dhcp/tests/Makefile
                  src/lib/exceptions/Makefile
                  src/lib/exceptions/tests/Makefile
                  src/lib/datasrc/Makefile
                  src/lib/datasrc/tests/Makefile
+                 src/lib/datasrc/tests/testdata/Makefile
                  src/lib/xfr/Makefile
                  src/lib/log/Makefile
                  src/lib/log/compiler/Makefile
@@ -869,6 +928,7 @@ AC_CONFIG_FILES([Makefile
                  src/lib/util/Makefile
                  src/lib/util/io/Makefile
                  src/lib/util/unittests/Makefile
+                 src/lib/util/python/Makefile
                  src/lib/util/pyunittests/Makefile
                  src/lib/util/tests/Makefile
                  src/lib/acl/Makefile
@@ -882,6 +942,7 @@ AC_CONFIG_FILES([Makefile
                  tests/tools/badpacket/tests/Makefile
                ])
 AC_OUTPUT([doc/version.ent
+           compatcheck/sqlite3-difftbl-check.py
            src/bin/cfgmgr/b10-cfgmgr.py
            src/bin/cfgmgr/tests/b10-cfgmgr_test.py
            src/bin/cmdctl/cmdctl.py
@@ -904,7 +965,7 @@ AC_OUTPUT([doc/version.ent
            src/bin/zonemgr/run_b10-zonemgr.sh
            src/bin/stats/stats.py
            src/bin/stats/stats_httpd.py
-           src/bin/bind10/bind10.py
+           src/bin/bind10/bind10_src.py
            src/bin/bind10/run_bind10.sh
            src/bin/bind10/tests/bind10_test.py
            src/bin/bindctl/run_bindctl.sh
@@ -928,11 +989,12 @@ AC_OUTPUT([doc/version.ent
            src/lib/python/isc/cc/tests/cc_test
            src/lib/python/isc/notify/tests/notify_out_test
            src/lib/python/isc/log/tests/log_console.py
+           src/lib/python/isc/log_messages/work/__init__.py
            src/lib/dns/gen-rdatacode.py
            src/lib/python/bind10_config.py
-           src/lib/dns/tests/testdata/gen-wiredata.py
            src/lib/cc/session_config.h.pre
            src/lib/cc/tests/session_unittests_config.h
+           src/lib/datasrc/datasrc_config.h.pre
            src/lib/log/tests/console_test.sh
            src/lib/log/tests/destination_test.sh
            src/lib/log/tests/init_logger_test.sh
@@ -940,12 +1002,28 @@ AC_OUTPUT([doc/version.ent
            src/lib/log/tests/severity_test.sh
            src/lib/log/tests/tempdir.h
            src/lib/util/python/mkpywrapper.py
+           src/lib/util/python/gen_wiredata.py
            src/lib/server_common/tests/data_path.h
+           tests/lettuce/setup_intree_bind10.sh
            tests/system/conf.sh
+           tests/system/run.sh
            tests/system/glue/setup.sh
            tests/system/glue/nsx1/b10-config.db
            tests/system/bindctl/nsx1/b10-config.db.template
+           tests/system/ixfr/db.example.n0
+           tests/system/ixfr/db.example.n2
+           tests/system/ixfr/db.example.n2.refresh
+           tests/system/ixfr/db.example.n4
+           tests/system/ixfr/db.example.n6
+           tests/system/ixfr/ixfr_init.sh
+           tests/system/ixfr/b10-config.db
+           tests/system/ixfr/common_tests.sh
+           tests/system/ixfr/in-1/setup.sh
+           tests/system/ixfr/in-2/setup.sh
+           tests/system/ixfr/in-3/setup.sh
+           tests/system/ixfr/in-4/setup.sh
           ], [
+           chmod +x compatcheck/sqlite3-difftbl-check.py
            chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
            chmod +x src/bin/xfrin/run_b10-xfrin.sh
            chmod +x src/bin/xfrout/run_b10-xfrout.sh
@@ -964,15 +1042,22 @@ AC_OUTPUT([doc/version.ent
            chmod +x src/bin/msgq/run_msgq.sh
            chmod +x src/bin/msgq/tests/msgq_test
            chmod +x src/lib/dns/gen-rdatacode.py
-           chmod +x src/lib/dns/tests/testdata/gen-wiredata.py
            chmod +x src/lib/log/tests/console_test.sh
            chmod +x src/lib/log/tests/destination_test.sh
            chmod +x src/lib/log/tests/init_logger_test.sh
            chmod +x src/lib/log/tests/local_file_test.sh
            chmod +x src/lib/log/tests/severity_test.sh
            chmod +x src/lib/util/python/mkpywrapper.py
+           chmod +x src/lib/util/python/gen_wiredata.py
            chmod +x src/lib/python/isc/log/tests/log_console.py
            chmod +x tests/system/conf.sh
+           chmod +x tests/system/run.sh
+           chmod +x tests/system/ixfr/ixfr_init.sh
+           chmod +x tests/system/ixfr/common_tests.sh
+           chmod +x tests/system/ixfr/in-1/setup.sh
+           chmod +x tests/system/ixfr/in-2/setup.sh
+           chmod +x tests/system/ixfr/in-3/setup.sh
+           chmod +x tests/system/ixfr/in-4/setup.sh
           ])
 AC_OUTPUT
 
diff --git a/doc/Doxyfile b/doc/Doxyfile
index ceb806f..ee5aaf8 100644
--- a/doc/Doxyfile
+++ b/doc/Doxyfile
@@ -568,13 +568,13 @@ WARN_LOGFILE           =
 # directories like "/usr/src/myproject". Separate the files or directories
 # with spaces.
 
-INPUT                  = ../src/lib/cc ../src/lib/config \
-    ../src/lib/cryptolink ../src/lib/dns ../src/lib/datasrc \
+INPUT                  = ../src/lib/exceptions ../src/lib/cc \
+    ../src/lib/config ../src/lib/cryptolink ../src/lib/dns ../src/lib/datasrc \
     ../src/bin/auth ../src/bin/resolver ../src/lib/bench ../src/lib/log \
     ../src/lib/log/compiler ../src/lib/asiolink/ ../src/lib/nsas \
     ../src/lib/testutils ../src/lib/cache ../src/lib/server_common/ \
     ../src/bin/sockcreator/ ../src/lib/util/ \
-    ../src/lib/resolve ../src/lib/acl
+    ../src/lib/resolve ../src/lib/acl ../src/bin/dhcp6 ../src/lib/dhcp
 
 # This tag can be used to specify the character encoding of the source files
 # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
diff --git a/doc/guide/Makefile.am b/doc/guide/Makefile.am
index c84ad06..239f235 100644
--- a/doc/guide/Makefile.am
+++ b/doc/guide/Makefile.am
@@ -1,5 +1,5 @@
 EXTRA_DIST = bind10-guide.css
-EXTRA_DIST += bind10-guide.xml bind10-guide.html
+EXTRA_DIST += bind10-guide.xml bind10-guide.html bind10-guide.txt
 EXTRA_DIST += bind10-messages.xml bind10-messages.html
 
 # This is not a "man" manual, but reuse this for now for docbook.
@@ -15,6 +15,11 @@ bind10-guide.html: bind10-guide.xml
 		http://docbook.sourceforge.net/release/xsl/current/html/docbook.xsl \
 		$(srcdir)/bind10-guide.xml
 
+HTML2TXT = elinks -dump -no-numbering -no-references
+
+bind10-guide.txt: bind10-guide.html
+	$(HTML2TXT) $(srcdir)/bind10-guide.html > $@
+
 bind10-messages.html: bind10-messages.xml
 	xsltproc --novalid --xinclude --nonet \
 		--path $(top_builddir)/doc \
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 5754cf0..2972cdf 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,24 +1,26 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p class="releaseinfo">This is the referenc
 e guide for BIND 10 version
-        20110519.</p></div><div><p class="copyright">Copyright © 2010 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20111021. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
 lass="releaseinfo">This is the reference guide for BIND 10 version
+        20111021.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
 	Internet Systems Consortium (ISC). It includes DNS libraries
 	and modular components for controlling authoritative and
 	recursive DNS servers.
       </p><p>
-        This is the reference guide for BIND 10 version 20110519.
-	The most up-to-date version of this document, along with
-	other documents for BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.  </p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">In
 stallation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a hr
 ef="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server<
 /a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230299038">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168230299065">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+        This is the reference guide for BIND 10 version 20111021.
+	The most up-to-date version of this document (in PDF, HTML,
+	and plain text formats), along with other documents for
+	BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
+	</p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
 lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt><dt><span class="section"><a href="#bind10.config">Configuration of started processes</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a><
 /span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438007">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229438072">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229438171">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438302">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438340">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438382">Trigger an Incoming Zone Transfer Ma
 nually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438673">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438891">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439042">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439052">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439294">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439468">Example session</a></span></dt></dl></dd><dt><s
 pan class="section"><a href="#id1168229440023">Logging Message Format</a></span></dt></dl></dd></dl></div><div class="list-of-tables"><p><b>List of Tables</b></p><dl><dt>3.1. <a href="#id1168229437338"></a></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
       BIND is the popular implementation of a DNS server, developer
       interfaces, and DNS tools.
       BIND 10 is a rewrite of BIND 9.  BIND 10 is written in C++ and Python
       and provides a modular environment for serving and maintaining DNS.
     </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
         This guide covers the experimental prototype of
-        BIND 10 version 20110519.
+        BIND 10 version 20111021.
       </p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
         BIND 10 provides a EDNS0- and DNSSEC-capable
         authoritative DNS server and a caching recursive name server
         which also provides forwarding.
-      </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299038"></a>Supported Platforms</h2></div></div></div><p>
+      </p></div><div class="section" title="Supported Platforms"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229451238"></a>Supported Platforms</h2></div></div></div><p>
   BIND 10 builds have been tested on Debian GNU/Linux 5,
   Ubuntu 9.10, NetBSD 5, Solaris 10, FreeBSD 7 and 8, and CentOS
   Linux 5.3.
@@ -28,13 +30,15 @@
 
         It is planned for BIND 10 to build, install and run on
         Windows and standard Unix-type platforms.
-      </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230299065"></a>Required Software</h2></div></div></div><p>
+      </p></div><div class="section" title="Required Software"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229451265"></a>Required Software</h2></div></div></div><p>
         BIND 10 requires Python 3.1.  Later versions may work, but Python
         3.1 is the minimum version which will work.
       </p><p>
 	BIND 10 uses the Botan crypto library for C++. It requires
-	at least Botan version 1.8. To build BIND 10, install the
-	Botan libraries and development include headers.
+	at least Botan version 1.8.
+      </p><p>
+	BIND 10 uses the log4cplus C++ logging library. It requires
+	at least log4cplus version 1.0.3.
       </p><p>
 	The authoritative server requires SQLite 3.3.9 or newer.
 	The <span class="command"><strong>b10-xfrin</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>,
@@ -136,7 +140,10 @@
       and, of course, DNS. These include detailed developer
       documentation and code examples.
 
-    </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230284846">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168230285026">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168230285045">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168230285106">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168230285203">Build</a></span></dt><dt><span class="section"><a href="#id1168230285219">Install</a></span></dt><dt><span class="section"><a href="#id1168230285242">Install Hierarchy<
 /a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230284846"></a>Building Requirements</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+    </p></div><div class="chapter" title="Chapter 2. Installation"><div class="titlepage"><div><div><h2 class="title"><a name="installation"></a>Chapter 2. Installation</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span class="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy<
 /a></span></dt></dl></dd></dl></div><div class="section" title="Building Requirements"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229436567"></a>Building Requirements</h2></div></div></div><p>
+          In addition to the run-time requirements, building BIND 10
+          from source code requires various development include headers.
+        </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
             Some operating systems have split their distribution packages into
             a run-time and a development package.  You will need to install
             the development package versions, which include header files and
@@ -147,6 +154,11 @@
   
   
         </p><p>
+	  To build BIND 10, also install the Botan (at least version
+	  1.8) and the log4cplus (at least version 1.0.3)
+          development include headers.
+        </p><p>
+
 	  The Python Library and Python _sqlite3 module are required to
           enable the Xfrout and Xfrin support.
         </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
@@ -156,7 +168,7 @@
           Building BIND 10 also requires a C++ compiler and
           standard development headers, make, and pkg-config.
           BIND 10 builds have been tested with GCC g++ 3.4.3, 4.1.2,
-          4.1.3, 4.2.1, 4.3.2, and 4.4.1.
+          4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++ 5.10.
         </p></div><div class="section" title="Quick start"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="quickstart"></a>Quick start</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
           This quickly covers the standard steps for installing
           and deploying BIND 10 as an authoritative name server using
@@ -192,14 +204,14 @@
         the Git code revision control system or as a downloadable
         tar file. It may also be available in pre-compiled ready-to-use
         packages from operating system vendors.
-      </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285026"></a>Download Tar File</h3></div></div></div><p>
+      </p><div class="section" title="Download Tar File"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436859"></a>Download Tar File</h3></div></div></div><p>
           Downloading a release tar file is the recommended method to
           obtain the source code.
         </p><p>
           The BIND 10 releases are available as tar file downloads from
           <a class="ulink" href="ftp://ftp.isc.org/isc/bind10/" target="_top">ftp://ftp.isc.org/isc/bind10/</a>.
           Periodic development snapshots may also be available.
-        </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285045"></a>Retrieve from Git</h3></div></div></div><p>
+        </p></div><div class="section" title="Retrieve from Git"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436878"></a>Retrieve from Git</h3></div></div></div><p>
           Downloading this "bleeding edge" code is recommended only for
           developers or advanced users.  Using development code in a production
           environment is not recommended.
@@ -233,7 +245,7 @@
           <span class="command"><strong>autoheader</strong></span>,
           <span class="command"><strong>automake</strong></span>,
           and related commands.
-        </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285106"></a>Configure before the build</h3></div></div></div><p>
+        </p></div><div class="section" title="Configure before the build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229436939"></a>Configure before the build</h3></div></div></div><p>
           BIND 10 uses the GNU Build System to discover build environment
           details.
           To generate the makefiles using the defaults, simply run:
@@ -242,7 +254,7 @@
           Run <span class="command"><strong>./configure</strong></span> with the <code class="option">--help</code>
           switch to view the different options. The commonly-used options are:
 
-          </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the the installation location (the
+          </p><div class="variablelist"><dl><dt><span class="term">--prefix</span></dt><dd>Define the installation location (the
                 default is <code class="filename">/usr/local/</code>).
               </dd><dt><span class="term">--with-boost-include</span></dt><dd>Define the path to find the Boost headers.
               </dd><dt><span class="term">--with-pythonpath</span></dt><dd>Define the path to Python 3.1 if it is not in the
@@ -264,16 +276,16 @@
         </p><p>
           If the configure fails, it may be due to missing or old
           dependencies.
-        </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285203"></a>Build</h3></div></div></div><p>
+        </p></div><div class="section" title="Build"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437037"></a>Build</h3></div></div></div><p>
     After the configure step is complete, to build the executables
     from the C++ code and prepare the Python scripts, run:
 
           </p><pre class="screen">$ <strong class="userinput"><code>make</code></strong></pre><p>
-        </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285219"></a>Install</h3></div></div></div><p>
+        </p></div><div class="section" title="Install"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437052"></a>Install</h3></div></div></div><p>
           To install the BIND 10 executables, support files,
           and documentation, run:
           </p><pre class="screen">$ <strong class="userinput"><code>make install</code></strong></pre><p>
-        </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168230285242"></a>Install Hierarchy</h3></div></div></div><p>
+        </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>The install step may require superuser privileges.</p></div></div><div class="section" title="Install Hierarchy"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229437076"></a>Install Hierarchy</h3></div></div></div><p>
           The following is the layout of the complete BIND 10 installation:
           </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem">
                 <code class="filename">bin/</code> —
@@ -303,29 +315,32 @@
                 <code class="filename">var/bind10-devel/</code> —
                 data source and configuration databases.
               </li></ul></div><p>
-        </p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></div><p>
-      BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which 
+        </p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt><dt><span class="section"><a href="#bind10.config">Configuration of started processes</a></span></dt></dl></div><p>
+      BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
       starts up the required processes.
       <span class="command"><strong>bind10</strong></span>
-      will also restart processes that exit unexpectedly.
+      will also restart some processes that exit unexpectedly.
       This is the only command needed to start the BIND 10 system.
     </p><p>
       After starting the <span class="command"><strong>b10-msgq</strong></span> communications channel,
-      <span class="command"><strong>bind10</strong></span> connects to it, 
+      <span class="command"><strong>bind10</strong></span> connects to it,
       runs the configuration manager, and reads its own configuration.
       Then it starts the other modules.
     </p><p>
-      The <span class="command"><strong>b10-msgq</strong></span> and <span class="command"><strong>b10-cfgmgr</strong></span>
+      The <span class="command"><strong>b10-sockcreator</strong></span>, <span class="command"><strong>b10-msgq</strong></span> and
+      <span class="command"><strong>b10-cfgmgr</strong></span>
       services make up the core. The <span class="command"><strong>b10-msgq</strong></span> daemon
       provides the communication channel between every part of the system.
       The <span class="command"><strong>b10-cfgmgr</strong></span> daemon is always needed by every
       module, if only to send information about themselves somewhere,
       but more importantly to ask about their own settings, and
-      about other modules.
-      The <span class="command"><strong>bind10</strong></span> master process will also start up
+      about other modules. The <span class="command"><strong>b10-sockcreator</strong></span> will
+      allocate sockets for the rest of the system.
+    </p><p>
+      In its default configuration, the <span class="command"><strong>bind10</strong></span>
+      master process will also start up
       <span class="command"><strong>b10-cmdctl</strong></span> for admins to communicate with the
-      system, <span class="command"><strong>b10-auth</strong></span> for authoritative DNS service or
-      <span class="command"><strong>b10-resolver</strong></span> for recursive name service,
+      system, <span class="command"><strong>b10-auth</strong></span> for authoritative DNS service,
       <span class="command"><strong>b10-stats</strong></span> for statistics collection,
       <span class="command"><strong>b10-xfrin</strong></span> for inbound DNS zone transfers,
       <span class="command"><strong>b10-xfrout</strong></span> for outbound DNS zone transfers,
@@ -334,6 +349,111 @@
         To start the BIND 10 service, simply run <span class="command"><strong>bind10</strong></span>.
         Run it with the <code class="option">--verbose</code> switch to
         get additional debugging or diagnostic output.
+      </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+          If the setproctitle Python module is detected at start up,
+          the process names for the Python-based daemons will be renamed
+          to better identify them instead of just <span class="quote">“<span class="quote">python</span>”</span>.
+          This is not needed on some operating systems.
+        </p></div></div><div class="section" title="Configuration of started processes"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="bind10.config"></a>Configuration of started processes</h2></div></div></div><p>
+        The processes to be started can be configured, with the exception
+        of the <span class="command"><strong>b10-sockcreator</strong></span>, <span class="command"><strong>b10-msgq</strong></span>
+        and <span class="command"><strong>b10-cfgmgr</strong></span>.
+      </p><p>
+        The configuration is in the Boss/components section. Each element
+        represents one component, which is an abstraction of a process
+        (currently there's also one component which doesn't represent
+        a process). If you didn't want to transfer out at all (your server
+        is a slave only), you would just remove the corresponding component
+        from the set, like this and the process would be stopped immediately
+        (and not started on the next startup):
+      </p><pre class="screen">> <strong class="userinput"><code>config remove Boss/components b10-xfrout</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+      </p><p>
+        To add a process to the set, let's say the resolver (which not started
+        by default), you would do this:
+        </p><pre class="screen">> <strong class="userinput"><code>config add Boss/components b10-resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/special resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/kind needed</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/priority 10</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+        Now, what it means. We add an entry called b10-resolver. It is both a
+        name used to reference this component in the configuration and the
+        name of the process to start. Then we set some parameters on how to
+        start it.
+      </p><p>
+        The special one is for components that need some kind of special care
+        during startup or shutdown. Unless specified, the component is started
+        in usual way. This is the list of components that need to be started
+        in a special way, with the value of special used for them:
+        </p><div class="table"><a name="id1168229437338"></a><p class="title"><b>Table 3.1. </b></p><div class="table-contents"><table border="1"><colgroup><col align="left"><col align="left"><col align="left"></colgroup><thead><tr><th align="left">Component</th><th align="left">Special</th><th align="left">Description</th></tr></thead><tbody><tr><td align="left">b10-auth</td><td align="left">auth</td><td align="left">Authoritative server</td></tr><tr><td align="left">b10-resolver</td><td align="left">resolver</td><td align="left">The resolver</td></tr><tr><td align="left">b10-cmdctl</td><td align="left">cmdctl</td><td align="left">The command control (remote control interface)</td></tr><tr><td align="left">setuid</td><td align="left">setuid</td><td align="left">Virtual component, see below</td></tr></tbody></table></div></div><p><br class="table-break">
+      </p><p>
+	The kind specifies how a failure of the component should
+	be handled.  If it is set to <span class="quote">“<span class="quote">dispensable</span>”</span>
+	(the default unless you set something else), it will get
+	started again if it fails. If it is set to <span class="quote">“<span class="quote">needed</span>”</span>
+	and it fails at startup, the whole <span class="command"><strong>bind10</strong></span>
+	shuts down and exits with error exit code. But if it fails
+	some time later, it is just started again. If you set it
+	to <span class="quote">“<span class="quote">core</span>”</span>, you indicate that the system is
+	not usable without the component and if such component
+	fails, the system shuts down no matter when the failure
+	happened.  This is the behaviour of the core components
+	(the ones you can't turn off), but you can declare any
+	other components as core as well if you wish (but you can
+	turn these off, they just can't fail).
+      </p><p>
+        The priority defines order in which the components should start.
+        The ones with higher number are started sooner than the ones with
+        lower ones. If you don't set it, 0 (zero) is used as the priority.
+      </p><p>
+        There are other parameters we didn't use in our example.
+	One of them is <span class="quote">“<span class="quote">address</span>”</span>. It is the address
+	used by the component on the <span class="command"><strong>b10-msgq</strong></span>
+	message bus. The special components already know their
+	address, but the usual ones don't. The address is by
+	convention the thing after <span class="emphasis"><em>b10-</em></span>, with
+	the first letter capital (eg. <span class="command"><strong>b10-stats</strong></span>
+	would have <span class="quote">“<span class="quote">Stats</span>”</span> as its address).
+
+      </p><p>
+        The last one is process. It is the name of the process to be started.
+        It defaults to the name of the component if not set, but you can use
+        this to override it.
+      </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+          This system allows you to start the same component multiple times
+          (by including it in the configuration with different names, but the
+          same process setting). However, the rest of the system doesn't expect
+          such situation, so it would probably not do what you want. Such
+          support is yet to be implemented.
+        </p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+	  The configuration is quite powerful, but that includes
+	  a lot of space for mistakes. You could turn off the
+	  <span class="command"><strong>b10-cmdctl</strong></span>, but then you couldn't
+	  change it back the usual way, as it would require it to
+	  be running (you would have to find and edit the configuration
+	  directly).  Also, some modules might have dependencies
+	  -- <span class="command"><strong>b10-stats-httpd</strong></span> need
+	  <span class="command"><strong>b10-stats</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>
+	  needs the <span class="command"><strong>b10-auth</strong></span> to be running, etc.
+
+
+
+        </p><p>
+          In short, you should think twice before disabling something here.
+        </p></div><p>
+	Now, to the mysterious setuid virtual component. If you
+	use the <span class="command"><strong>-u</strong></span> option to start the
+	<span class="command"><strong>bind10</strong></span> as root, but change the user
+	later, we need to start the <span class="command"><strong>b10-auth</strong></span> or
+	<span class="command"><strong>b10-resolver</strong></span> as root (until the socket
+	creator is finished). So we need to specify
+	the time when the switch from root do the given user happens
+	and that's what the setuid component is for. The switch is
+	done at the time the setuid component would be started, if
+	it was a process. The default configuration contains the
+	setuid component with priority 5, <span class="command"><strong>b10-auth</strong></span>
+	has 10 to be started before the switch and everything else
+	is without priority, so it is started after the switch.
       </p></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
         The BIND 10 components use the <span class="command"><strong>b10-msgq</strong></span>
         message routing daemon to communicate with other BIND 10 components.
@@ -490,12 +610,12 @@ shutdown
       the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
       channel) the configuration on to the specified module.
     </p><p>
-    </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230285816">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168230285881">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168230285912">Loading Master Zones Files</a></span></dt></dl></div><p>
+    </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438007">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229438072">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229438171">Loading Master Zones Files</a></span></dt></dl></div><p>
       The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
       It supports EDNS0 and DNSSEC. It supports IPv6.
       Normally it is started by the <span class="command"><strong>bind10</strong></span> master
       process.
-    </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285816"></a>Server Configurations</h2></div></div></div><p>
+    </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438007"></a>Server Configurations</h2></div></div></div><p>
         <span class="command"><strong>b10-auth</strong></span> is configured via the
         <span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
         The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -515,7 +635,7 @@ This may be a temporary setting until then.
         </p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
               </dd></dl></div><p>
 
-      </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285881"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+      </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438072"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
         For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
         supports a SQLite3 data source backend and in-memory data source
         backend.
@@ -529,7 +649,7 @@ This may be a temporary setting until then.
         The default is <code class="filename">/usr/local/var/</code>.)
   This data file location may be changed by defining the
   <span class="quote">“<span class="quote">database_file</span>”</span> configuration.
-      </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230285912"></a>Loading Master Zones Files</h2></div></div></div><p>
+      </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438171"></a>Loading Master Zones Files</h2></div></div></div><p>
         RFC 1035 style DNS master zone files may imported
         into a BIND 10 data source by using the
         <span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -558,41 +678,119 @@ This may be a temporary setting until then.
         If you reload a zone already existing in the database,
         all records from that prior zone disappear and a whole new set
         appears.
-      </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><p>
+      </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438302">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438340">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438382">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
       Incoming zones are transferred using the <span class="command"><strong>b10-xfrin</strong></span>
       process which is started by <span class="command"><strong>bind10</strong></span>.
-      When received, the zone is stored in the BIND 10
-      data store, and its records can be served by
+      When received, the zone is stored in the corresponding BIND 10
+      data source, and its records can be served by
       <span class="command"><strong>b10-auth</strong></span>.
       In combination with <span class="command"><strong>b10-zonemgr</strong></span> (for
       automated SOA checks), this allows the BIND 10 server to
       provide <span class="quote">“<span class="quote">secondary</span>”</span> service.
+    </p><p>
+      The <span class="command"><strong>b10-xfrin</strong></span> process supports both AXFR and
+      IXFR.  Due to some implementation limitations of the current
+      development release, however, it only tries AXFR by default,
+      and care should be taken to enable IXFR.
     </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
-     The current development release of BIND 10 only supports
-     AXFR. (IXFR is not supported.) 
-
+     In the current development release of BIND 10, incoming zone
+     transfers are only available for SQLite3-based data sources,
+     that is, they don't work for an in-memory data source.
+    </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438302"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
+	In practice, you need to specify a list of secondary zones to
+	enable incoming zone transfers for these zones (you can still
+	trigger a zone transfer manually, without a prior configuration
+	(see below)).
+      </p><p>
+	For example, to enable zone transfers for a zone named "example.com"
+	(whose master address is assumed to be 2001:db8::53 here),
+	run the following at the <span class="command"><strong>bindctl</strong></span> prompt:
 
+      </p><pre class="screen">> <strong class="userinput"><code>config add Xfrin/zones</code></strong>
+> <strong class="userinput"><code>config set Xfrin/zones[0]/name "<code class="option">example.com</code>"</code></strong>
+> <strong class="userinput"><code>config set Xfrin/zones[0]/master_addr "<code class="option">2001:db8::53</code>"</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
 
-    </p></div><p>
-       To manually trigger a zone transfer to retrieve a remote zone,
-       you may use the <span class="command"><strong>bindctl</strong></span> utility.
-       For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
+      (We assume there has been no zone configuration before).
+      </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438340"></a>Enabling IXFR</h2></div></div></div><p>
+        As noted above, <span class="command"><strong>b10-xfrin</strong></span> uses AXFR for
+        zone transfers by default.  To enable IXFR for zone transfers
+        for a particular zone, set the <strong class="userinput"><code>use_ixfr</code></strong>
+        configuration parameter to <strong class="userinput"><code>true</code></strong>.
+        In the above example of configuration sequence, you'll need
+        to add the following before performing <strong class="userinput"><code>commit</code></strong>:
+      </p><pre class="screen">> <strong class="userinput"><code>config set Xfrin/zones[0]/use_ixfr true</code></strong></pre><p>
+      </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+      One reason why IXFR is disabled by default in the current
+      release is because it does not support automatic fallback from IXFR to
+      AXFR when it encounters a primary server that doesn't support
+      outbound IXFR (and, not many existing implementations support
+      it).  Another, related reason is that it does not use AXFR even
+      if it has no knowledge about the zone (like at the very first
+      time the secondary server is set up).  IXFR requires the
+      "current version" of the zone, so obviously it doesn't work
+      in this situation and AXFR is the only workable choice.
+      The current release of <span class="command"><strong>b10-xfrin</strong></span> does not
+      make this selection automatically.
+      These features will be implemented in a near future
+      version, at which point we will enable IXFR by default.
+      </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438382"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
+	To manually trigger a zone transfer to retrieve a remote zone,
+	you may use the <span class="command"><strong>bindctl</strong></span> utility.
+	For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
 
-       </p><pre class="screen">> <strong class="userinput"><code>Xfrin retransfer zone_name="<code class="option">foo.example.org</code>" master=<code class="option">192.0.2.99</code></code></strong></pre><p>
-    </p></div><div class="chapter" title="Chapter 10. Outbound Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrout"></a>Chapter 10. Outbound Zone Transfers</h2></div></div></div><p>
+	</p><pre class="screen">> <strong class="userinput"><code>Xfrin retransfer zone_name="<code class="option">foo.example.org</code>" master=<code class="option">192.0.2.99</code></code></strong></pre><p>
+      </p></div></div><div class="chapter" title="Chapter 10. Outbound Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrout"></a>Chapter 10. Outbound Zone Transfers</h2></div></div></div><p>
       The <span class="command"><strong>b10-xfrout</strong></span> process is started by
       <span class="command"><strong>bind10</strong></span>.
       When the <span class="command"><strong>b10-auth</strong></span> authoritative DNS server
-      receives an AXFR request, <span class="command"><strong>b10-xfrout</strong></span>
-      sends the zone.
-      This is used to provide master DNS service to share zones
+      receives an AXFR or IXFR request, <span class="command"><strong>b10-auth</strong></span>
+      internally forwards the request to <span class="command"><strong>b10-xfrout</strong></span>,
+      which handles the rest of request processing.
+      This is used to provide primary DNS service to share zones
       to secondary name servers.
       The <span class="command"><strong>b10-xfrout</strong></span> is also used to send
-      NOTIFY messages to slaves.
+      NOTIFY messages to secondary servers.
+    </p><p>
+      A global or per zone <code class="option">transfer_acl</code> configuration
+      can be used to control accessibility of the outbound zone
+      transfer service.
+      By default, <span class="command"><strong>b10-xfrout</strong></span> allows any clients to
+      perform zone transfers for any zones:
+    </p><pre class="screen">> <strong class="userinput"><code>config show Xfrout/transfer_acl</code></strong>
+Xfrout/transfer_acl[0]	{"action": "ACCEPT"}	any	(default)</pre><p>
+      You can change this to, for example, rejecting all transfer
+      requests by default while allowing requests for the transfer
+      of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
+    </p><pre class="screen">> <strong class="userinput"><code>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</code></strong>
+> <strong class="userinput"><code>config add Xfrout/zone_config</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/origin "example.com"</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</code></strong>
+<strong class="userinput"><code>                                                 {"action": "ACCEPT", "from": "2001:db8::1"}]</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+	In the above example the lines
+	for <code class="option">transfer_acl</code> were divided for
+	readability.  In the actual input it must be in a single line.
+    </p></div><p>
+      If you want to require TSIG in access control, a separate TSIG
+      "key ring" must be configured specifically
+      for <span class="command"><strong>b10-xfrout</strong></span> as well as a system wide
+      key ring, both containing a consistent set of keys.
+      For example, to change the previous example to allowing requests
+      from 192.0.2.1 signed by a TSIG with a key name of
+      "key.example", you'll need to do this:
+    </p><pre class="screen">> <strong class="userinput"><code>config set tsig_keys/keys ["key.example:<base64-key>"]</code></strong>
+> <strong class="userinput"><code>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+      The first line of configuration defines a system wide key ring.
+      This is necessary because the <span class="command"><strong>b10-auth</strong></span> server
+      also checks TSIGs and it uses the system wide configuration.
     </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
-     The current development release of BIND 10 only supports
-     AXFR. (IXFR is not supported.) 
-     Access control is not yet provided.
+	In a future version, <span class="command"><strong>b10-xfrout</strong></span> will also
+	use the system wide TSIG configuration.
+	The way to specify zone specific configuration (ACLs, etc) is
+	likely to be changed, too.
     </p></div></div><div class="chapter" title="Chapter 11. Secondary Manager"><div class="titlepage"><div><div><h2 class="title"><a name="zonemgr"></a>Chapter 11. Secondary Manager</h2></div></div></div><p>
       The <span class="command"><strong>b10-zonemgr</strong></span> process is started by
       <span class="command"><strong>bind10</strong></span>.
@@ -607,21 +805,26 @@ This may be a temporary setting until then.
     </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
      Access control (such as allowing notifies) is not yet provided.
      The primary/secondary service is not yet complete.
-    </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168230286300">Forwarding</a></span></dt></dl></div><p>
+    </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438673">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438891">Forwarding</a></span></dt></dl></div><p>
       The <span class="command"><strong>b10-resolver</strong></span> process is started by
       <span class="command"><strong>bind10</strong></span>.
 
     </p><p>
       The main <span class="command"><strong>bind10</strong></span> process can be configured
-      to select to run either the authoritative or resolver.
+      to select to run either the authoritative or resolver or both.
       By default, it starts the authoritative service.
 
 
       You may change this using <span class="command"><strong>bindctl</strong></span>, for example:
 
       </p><pre class="screen">
-> <strong class="userinput"><code>config set Boss/start_auth false</code></strong>
-> <strong class="userinput"><code>config set Boss/start_resolver true</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-xfrout</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-xfrin</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-auth</code></strong>
+> <strong class="userinput"><code>config add Boss/components b10-resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/special resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/kind needed</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/priority 10</code></strong>
 > <strong class="userinput"><code>config commit</code></strong>
 </pre><p>
 
@@ -629,14 +832,52 @@ This may be a temporary setting until then.
        The master <span class="command"><strong>bind10</strong></span> will stop and start
        the desired services.
     </p><p>
-      The resolver also needs to be configured to listen on an address
-      and port:
+      By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+      The following example shows how it can be configured to
+      listen on an additional address (and port):
 
       </p><pre class="screen">
-> <strong class="userinput"><code>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</code></strong>
+> <strong class="userinput"><code>config add Resolver/listen_on</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/address "192.168.1.1"</code></strong>
+> <strong class="userinput"><code>config set Resolver/listen_on[<em class="replaceable"><code>2</code></em>]/port 53</code></strong>
 > <strong class="userinput"><code>config commit</code></strong>
 </pre><p>
-    </p><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168230286300"></a>Forwarding</h2></div></div></div><p>
+    </p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+       as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+       Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438673"></a>Access Control</h2></div></div></div><p>
+        By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
+        DNS queries from the localhost (127.0.0.1 and ::1).
+        The <code class="option">Resolver/query_acl</code> configuration may
+	be used to reject, drop, or allow specific IPs or networks.
+        This configuration list is first match.
+      </p><p>
+	The configuration's <code class="option">action</code> item may be
+	set to <span class="quote">“<span class="quote">ACCEPT</span>”</span> to allow the incoming query,
+	<span class="quote">“<span class="quote">REJECT</span>”</span> to respond with a DNS REFUSED return
+	code, or <span class="quote">“<span class="quote">DROP</span>”</span> to ignore the query without
+	any response (such as a blackhole).  For more information,
+	see the respective debugging messages:  <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_ACCEPTED" target="_top">RESOLVER_QUERY_ACCEPTED</a>,
+	<a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_REJECTED" target="_top">RESOLVER_QUERY_REJECTED</a>,
+	and <a class="ulink" href="bind10-messages.html#RESOLVER_QUERY_DROPPED" target="_top">RESOLVER_QUERY_DROPPED</a>.
+      </p><p>
+	The required configuration's <code class="option">from</code> item is set
+        to an IPv4 or IPv6 address, addresses with an network mask, or to
+	the special lowercase keywords <span class="quote">“<span class="quote">any6</span>”</span> (for
+	any IPv6 address) or <span class="quote">“<span class="quote">any4</span>”</span> (for any IPv4
+	address).
+      </p><p>
+	For example to allow the <em class="replaceable"><code>192.168.1.0/24</code></em>
+	network to use your recursive name server, at the
+	<span class="command"><strong>bindctl</strong></span> prompt run:
+      </p><pre class="screen">
+> <strong class="userinput"><code>config add Resolver/query_acl</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/action "ACCEPT"</code></strong>
+> <strong class="userinput"><code>config set Resolver/query_acl[<em class="replaceable"><code>2</code></em>]/from "<em class="replaceable"><code>192.168.1.0/24</code></em>"</code></strong>
+> <strong class="userinput"><code>config commit</code></strong>
+</pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
+       as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
+       Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
+      syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438891"></a>Forwarding</h2></div></div></div><p>
 
         To enable forwarding, the upstream address and port must be
         configured to forward queries to, such as:
@@ -664,68 +905,440 @@ This may be a temporary setting until then.
 
     </p><p>
 
-       This stats daemon provides commands to identify if it is running,
-       show specified or all statistics data, set values, remove data,
-       and reset data.
+       This stats daemon provides commands to identify if it is
+       running, show specified or all statistics data, show specified
+       or all statistics data schema, and set specified statistics
+       data.
 
        For example, using <span class="command"><strong>bindctl</strong></span>:
 
        </p><pre class="screen">
 > <strong class="userinput"><code>Stats show</code></strong>
 {
-    "auth.queries.tcp": 1749,
-    "auth.queries.udp": 867868,
-    "bind10.boot_time": "2011-01-20T16:59:03Z",
-    "report_time": "2011-01-20T17:04:06Z",
-    "stats.boot_time": "2011-01-20T16:59:05Z",
-    "stats.last_update_time": "2011-01-20T17:04:05Z",
-    "stats.lname": "4d3869d9_a at jreed.example.net",
-    "stats.start_time": "2011-01-20T16:59:05Z",
-    "stats.timestamp": 1295543046.823504
+    "Auth": {
+        "queries.tcp": 1749,
+        "queries.udp": 867868
+    },
+    "Boss": {
+        "boot_time": "2011-01-20T16:59:03Z"
+    },
+    "Stats": {
+        "boot_time": "2011-01-20T16:59:05Z",
+        "last_update_time": "2011-01-20T17:04:05Z",
+        "lname": "4d3869d9_a at jreed.example.net",
+        "report_time": "2011-01-20T17:04:06Z",
+        "timestamp": 1295543046.823504
+    }
 }
        </pre><p>
-    </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><p>
-        Each message written by BIND 10 to the configured logging destinations
-        comprises a number of components that identify the origin of the
-        message and, if the message indicates a problem, information about the
-        problem that may be useful in fixing it.
-    </p><p>
-        Consider the message below logged to a file:
-        </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+    </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229439042">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439052">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439294">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439468">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229440023">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439042"></a>Logging configuration</h2></div></div></div><p>
+
+	The logging system in BIND 10 is configured through the
+	Logging module. All BIND 10 modules will look at the
+	configuration in Logging to see what should be logged and
+	to where.
+
+
+
+      </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439052"></a>Loggers</h3></div></div></div><p>
+
+	  Within BIND 10, a message is logged through a component
+	  called a "logger". Different parts of BIND 10 log messages
+	  through different loggers, and each logger can be configured
+	  independently of one another.
+
+        </p><p>
+
+	  In the Logging module, you can specify the configuration
+	  for zero or more loggers; any that are not specified will
+	  take appropriate default values..
+
+        </p><p>
+
+	  The three most important elements of a logger configuration
+	  are the <code class="option">name</code> (the component that is
+	  generating the messages), the <code class="option">severity</code>
+	  (what to log), and the <code class="option">output_options</code>
+	  (where to log).
+
+        </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439077"></a>name (string)</h4></div></div></div><p>
+	  Each logger in the system has a name, the name being that
+	  of the component using it to log messages. For instance,
+	  if you want to configure logging for the resolver module,
+	  you add an entry for a logger named <span class="quote">“<span class="quote">Resolver</span>”</span>. This
+	  configuration will then be used by the loggers in the
+	  Resolver module, and all the libraries used by it.
+              </p><p>
+
+	  If you want to specify logging for one specific library
+	  within the module, you set the name to
+	  <em class="replaceable"><code>module.library</code></em>.  For example, the
+	  logger used by the nameserver address store component
+	  has the full name of <span class="quote">“<span class="quote">Resolver.nsas</span>”</span>. If
+	  there is no entry in Logging for a particular library,
+	  it will use the configuration given for the module.
+
+
+
+        </p><p>
+
+
+
+	  To illustrate this, suppose you want the cache library
+	  to log messages of severity DEBUG, and the rest of the
+	  resolver code to log messages of severity INFO. To achieve
+	  this you specify two loggers, one with the name
+	  <span class="quote">“<span class="quote">Resolver</span>”</span> and severity INFO, and one with
+	  the name <span class="quote">“<span class="quote">Resolver.cache</span>”</span> with severity
+	  DEBUG. As there are no entries for other libraries (e.g.
+	  the nsas), they will use the configuration for the module
+	  (<span class="quote">“<span class="quote">Resolver</span>”</span>), so giving the desired behavior.
+
+        </p><p>
+
+	  One special case is that of a module name of <span class="quote">“<span class="quote">*</span>”</span>
+	  (asterisks), which is interpreted as <span class="emphasis"><em>any</em></span>
+	  module. You can set global logging options by using this,
+	  including setting the logging configuration for a library
+	  that is used by multiple modules (e.g. <span class="quote">“<span class="quote">*.config</span>”</span>
+	  specifies the configuration library code in whatever
+	  module is using it).
+
+        </p><p>
+
+	  If there are multiple logger specifications in the
+	  configuration that might match a particular logger, the
+	  specification with the more specific logger name takes
+	  precedence. For example, if there are entries for for
+	  both <span class="quote">“<span class="quote">*</span>”</span> and <span class="quote">“<span class="quote">Resolver</span>”</span>, the
+	  resolver module — and all libraries it uses —
+	  will log messages according to the configuration in the
+	  second entry (<span class="quote">“<span class="quote">Resolver</span>”</span>). All other modules
+	  will use the configuration of the first entry
+	  (<span class="quote">“<span class="quote">*</span>”</span>). If there was also a configuration
+	  entry for <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, the cache library
+	  within the resolver would use that in preference to the
+	  entry for <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+        </p><p>
+
+	  One final note about the naming. When specifying the
+	  module name within a logger, use the name of the module
+	  as specified in <span class="command"><strong>bindctl</strong></span>, e.g.
+	  <span class="quote">“<span class="quote">Resolver</span>”</span> for the resolver module,
+	  <span class="quote">“<span class="quote">Xfrout</span>”</span> for the xfrout module, etc. When
+	  the message is logged, the message will include the name
+	  of the logger generating the message, but with the module
+	  name replaced by the name of the process implementing
+	  the module (so for example, a message generated by the
+	  <span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
+	  with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
+
+        </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439176"></a>severity (string)</h4></div></div></div><p>
+
+          This specifies the category of messages logged.
+	  Each message is logged with an associated severity which
+	  may be one of the following (in descending order of
+	  severity):
+        </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> FATAL </li><li class="listitem"> ERROR </li><li class="listitem"> WARN </li><li class="listitem"> INFO </li><li class="listitem"> DEBUG </li></ul></div><p>
+
+	  When the severity of a logger is set to one of these
+	  values, it will only log messages of that severity, and
+	  the severities above it. The severity may also be set to
+	  NONE, in which case all messages from that logger are
+	  inhibited.
+
+
+
+        </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439227"></a>output_options (list)</h4></div></div></div><p>
+
+	  Each logger can have zero or more
+	  <code class="option">output_options</code>. These specify where log
+	  messages are sent to. These are explained in detail below.
+
+        </p><p>
+
+          The other options for a logger are:
+
+        </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439243"></a>debuglevel (integer)</h4></div></div></div><p>
+
+	  When a logger's severity is set to DEBUG, this value
+	  specifies what debug messages should be printed. It ranges
+	  from 0 (least verbose) to 99 (most verbose).
+        </p><p>
+
+          If severity for the logger is not DEBUG, this value is ignored.
+
+        </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439258"></a>additive (true or false)</h4></div></div></div><p>
+
+	  If this is true, the <code class="option">output_options</code> from
+	  the parent will be used. For example, if there are two
+	  loggers configured; <span class="quote">“<span class="quote">Resolver</span>”</span> and
+	  <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, and <code class="option">additive</code>
+	  is true in the second, it will write the log messages
+	  not only to the destinations specified for
+	  <span class="quote">“<span class="quote">Resolver.cache</span>”</span>, but also to the destinations
+	  as specified in the <code class="option">output_options</code> in
+	  the logger named <span class="quote">“<span class="quote">Resolver</span>”</span>.
+
+
+
+      </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439294"></a>Output Options</h3></div></div></div><p>
+
+	  The main settings for an output option are the
+	  <code class="option">destination</code> and a value called
+	  <code class="option">output</code>, the meaning of which depends on
+	  the destination that is set.
+
+        </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439309"></a>destination (string)</h4></div></div></div><p>
+
+            The destination is the type of output. It can be one of:
+
+          </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439341"></a>output (string)</h4></div></div></div><p>
+
+	  Depending on what is set as the output destination, this
+	  value is interpreted as follows:
+
+        </p><div class="variablelist"><dl><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">console</span>”</span></span></dt><dd>
+		 The value of output must be one of <span class="quote">“<span class="quote">stdout</span>”</span>
+		 (messages printed to standard output) or
+		 <span class="quote">“<span class="quote">stderr</span>”</span> (messages printed to standard
+		 error).
+              </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span></span></dt><dd>
+		The value of output is interpreted as a file name;
+		log messages will be appended to this file.
+              </dd><dt><span class="term"><code class="option">destination</code> is <span class="quote">“<span class="quote">syslog</span>”</span></span></dt><dd>
+		The value of output is interpreted as the
+		<span class="command"><strong>syslog</strong></span> facility (e.g.
+		<span class="emphasis"><em>local0</em></span>) that should be used
+		for log messages.
+              </dd></dl></div><p>
+
+          The other options for <code class="option">output_options</code> are:
+
+        </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439427"></a>flush (true of false)</h5></div></div></div><p>
+	    Flush buffers after each log message. Doing this will
+	    reduce performance but will ensure that if the program
+	    terminates abnormally, all messages up to the point of
+	    termination are output.
+          </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439436"></a>maxsize (integer)</h5></div></div></div><p>
+	    Only relevant when destination is file, this is maximum
+	    file size of output files in bytes. When the maximum
+	    size is reached, the file is renamed and a new file opened.
+	    (For example, a ".1" is appended to the name —
+	    if a ".1" file exists, it is renamed ".2",
+            etc.)
+          </p><p>
+            If this is 0, no maximum file size is used.
+          </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439449"></a>maxver (integer)</h5></div></div></div><p>
+	    Maximum number of old log files to keep around when
+	    rolling the output file. Only relevant when
+	    <code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
+          </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439468"></a>Example session</h3></div></div></div><p>
+
+	  In this example we want to set the global logging to
+	  write to the file <code class="filename">/var/log/my_bind10.log</code>,
+	  at severity WARN. We want the authoritative server to
+	  log at DEBUG with debuglevel 40, to a different file
+	  (<code class="filename">/tmp/debug_messages</code>).
+
+        </p><p>
+
+          Start <span class="command"><strong>bindctl</strong></span>.
+
+        </p><p>
+
+           </p><pre class="screen">["login success "]
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers	[]	list
+</pre><p>
+
+        </p><p>
+
+	  By default, no specific loggers are configured, in which
+	  case the severity defaults to INFO and the output is
+	  written to stderr.
+
+        </p><p>
+
+          Let's first add a default logger:
+
+        </p><p>
+
+          </p><pre class="screen"><strong class="userinput"><code>> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code>config show Logging</code></strong>
+Logging/loggers/	list	(modified)
+</pre><p>
+
+        </p><p>
+
+	  The loggers value line changed to indicate that it is no
+	  longer an empty list:
+
+        </p><p>
+
+          </p><pre class="screen">> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name	""	string	(default)
+Logging/loggers[0]/severity	"INFO"	string	(default)
+Logging/loggers[0]/debuglevel	0	integer	(default)
+Logging/loggers[0]/additive	false	boolean	(default)
+Logging/loggers[0]/output_options	[]	list	(default)
+</pre><p>
+
+        </p><p>
+
+	  The name is mandatory, so we must set it. We will also
+	  change the severity as well. Let's start with the global
+	  logger.
+
+        </p><p>
+
+          </p><pre class="screen">> <strong class="userinput"><code>config set Logging/loggers[0]/name *</code></strong>
+> <strong class="userinput"><code>config set Logging/loggers[0]/severity WARN</code></strong>
+> <strong class="userinput"><code>config show Logging/loggers</code></strong>
+Logging/loggers[0]/name	"*"	string	(modified)
+Logging/loggers[0]/severity	"WARN"	string	(modified)
+Logging/loggers[0]/debuglevel	0	integer	(default)
+Logging/loggers[0]/additive	false	boolean	(default)
+Logging/loggers[0]/output_options	[]	list	(default)
+</pre><p>
+
+        </p><p>
+
+	  Of course, we need to specify where we want the log
+	  messages to go, so we add an entry for an output option.
+
+        </p><p>
+
+          </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers[0]/output_options</code></strong>
+> <strong class="userinput"><code> config show Logging/loggers[0]/output_options</code></strong>
+Logging/loggers[0]/output_options[0]/destination	"console"	string	(default)
+Logging/loggers[0]/output_options[0]/output	"stdout"	string	(default)
+Logging/loggers[0]/output_options[0]/flush	false	boolean	(default)
+Logging/loggers[0]/output_options[0]/maxsize	0	integer	(default)
+Logging/loggers[0]/output_options[0]/maxver	0	integer	(default)
+</pre><p>
+
+
+        </p><p>
+
+          These aren't the values we are looking for.
+
+        </p><p>
+
+          </p><pre class="screen">> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxsize 30000</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[0]/output_options[0]/maxver 8</code></strong>
+</pre><p>
+
+        </p><p>
+
+	  Which would make the entire configuration for this logger
+	  look like:
+
+        </p><p>
+
+          </p><pre class="screen">> <strong class="userinput"><code> config show all Logging/loggers</code></strong>
+Logging/loggers[0]/name	"*"	string	(modified)
+Logging/loggers[0]/severity	"WARN"	string	(modified)
+Logging/loggers[0]/debuglevel	0	integer	(default)
+Logging/loggers[0]/additive	false	boolean	(default)
+Logging/loggers[0]/output_options[0]/destination	"file"	string	(modified)
+Logging/loggers[0]/output_options[0]/output	"/var/log/bind10.log"	string	(modified)
+Logging/loggers[0]/output_options[0]/flush	false	boolean	(default)
+Logging/loggers[0]/output_options[0]/maxsize	30000	integer	(modified)
+Logging/loggers[0]/output_options[0]/maxver	8	integer	(modified)
+</pre><p>
+
+        </p><p>
+
+	  That looks OK, so let's commit it before we add the
+	  configuration for the authoritative server's logger.
+
+        </p><p>
+
+          </p><pre class="screen">> <strong class="userinput"><code> config commit</code></strong></pre><p>
+
+        </p><p>
+
+	  Now that we have set it, and checked each value along
+	  the way, adding a second entry is quite similar.
+
+        </p><p>
+
+          </p><pre class="screen">> <strong class="userinput"><code> config add Logging/loggers</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/name Auth</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/severity DEBUG</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/debuglevel 40</code></strong>
+> <strong class="userinput"><code> config add Logging/loggers[1]/output_options</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/destination file</code></strong>
+> <strong class="userinput"><code> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+        </p><p>
+
+	  And that's it. Once we have found whatever it was we
+	  needed the debug messages for, we can simply remove the
+	  second logger to let the authoritative server use the
+	  same settings as the rest.
+
+        </p><p>
+
+          </p><pre class="screen">> <strong class="userinput"><code> config remove Logging/loggers[1]</code></strong>
+> <strong class="userinput"><code> config commit</code></strong>
+</pre><p>
+
+        </p><p>
+
+	  And every module will now be using the values from the
+	  logger named <span class="quote">“<span class="quote">*</span>”</span>.
+
+        </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229440023"></a>Logging Message Format</h2></div></div></div><p>
+	  Each message written by BIND 10 to the configured logging
+	  destinations comprises a number of components that identify
+	  the origin of the message and, if the message indicates
+	  a problem, information about the problem that may be
+	  useful in fixing it.
+      </p><p>
+          Consider the message below logged to a file:
+          </p><pre class="screen">2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
     ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</pre><p>
-    </p><p>
-      Note: the layout of messages written to the system logging
-      file (syslog) may be slightly different.  This message has
-      been split across two lines here for display reasons; in the
-      logging file, it will appear on one line.)
-    </p><p>
-      The log message comprises a number of components:
-
-        </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
-            The date and time at which the message was generated.
-        </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
-            The severity of the message.
-        </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
-	    The source of the message.  This comprises two components:
-	    the BIND 10 process generating the message (in this
-	    case, <span class="command"><strong>b10-resolver</strong></span>) and the module
-	    within the program from which the message originated
-	    (which in the example is the asynchronous I/O link
-	    module, asiolink).
-        </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
+      </p><p>
+        Note: the layout of messages written to the system logging
+        file (syslog) may be slightly different.  This message has
+        been split across two lines here for display reasons; in the
+        logging file, it will appear on one line.)
+      </p><p>
+        The log message comprises a number of components:
+
+          </p><div class="variablelist"><dl><dt><span class="term">2011-06-15 13:48:22.034</span></dt><dd><p>
+              The date and time at which the message was generated.
+          </p></dd><dt><span class="term">ERROR</span></dt><dd><p>
+              The severity of the message.
+          </p></dd><dt><span class="term">[b10-resolver.asiolink]</span></dt><dd><p>
+            The source of the message.  This comprises two components:
+            the BIND 10 process generating the message (in this
+            case, <span class="command"><strong>b10-resolver</strong></span>) and the module
+            within the program from which the message originated
+            (which in the example is the asynchronous I/O link
+            module, asiolink).
+          </p></dd><dt><span class="term">ASIODNS_OPENSOCK</span></dt><dd><p>
 	    The message identification.  Every message in BIND 10
 	    has a unique identification, which can be used as an
 	    index into the <a class="ulink" href="bind10-messages.html" target="_top"><em class="citetitle">BIND 10 Messages
 	    Manual</em></a> (<a class="ulink" href="http://bind10.isc.org/docs/bind10-messages.html" target="_top">http://bind10.isc.org/docs/bind10-messages.html</a>) from which more information can be obtained.
-        </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
-            A brief description of the cause of the problem.  Within this text,
-            information relating to the condition that caused the message to
-            be logged will be included.  In this example, error number 111
-            (an operating system-specific error number) was encountered when
-            trying to open a TCP connection to port 53 on the local system
-            (address 127.0.0.1).  The next step would be to find out the reason
-            for the failure by consulting your system's documentation to
-            identify what error number 111 means.
-        </p></dd></dl></div><p>
-
-    </p></div></div></body></html>
+          </p></dd><dt><span class="term">error 111 opening TCP socket to 127.0.0.1(53)</span></dt><dd><p>
+	      A brief description of the cause of the problem.
+	      Within this text, information relating to the condition
+	      that caused the message to be logged will be included.
+	      In this example, error number 111 (an operating
+	      system-specific error number) was encountered when
+	      trying to open a TCP connection to port 53 on the
+	      local system (address 127.0.0.1).  The next step
+	      would be to find out the reason for the failure by
+	      consulting your system's documentation to identify
+	      what error number 111 means.
+          </p></dd></dl></div><p>
+      </p></div></div></div></body></html>
diff --git a/doc/guide/bind10-guide.txt b/doc/guide/bind10-guide.txt
new file mode 100644
index 0000000..9c8ffbe
--- /dev/null
+++ b/doc/guide/bind10-guide.txt
@@ -0,0 +1,1360 @@
+                                 BIND 10 Guide
+
+Administrator Reference for BIND 10
+
+   This is the reference guide for BIND 10 version 20111021.
+
+   Copyright (c) 2010-2011 Internet Systems Consortium, Inc.
+
+   Abstract
+
+   BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems
+   Consortium (ISC). It includes DNS libraries and modular components for
+   controlling authoritative and recursive DNS servers.
+
+   This is the reference guide for BIND 10 version 20111021. The most
+   up-to-date version of this document (in PDF, HTML, and plain text
+   formats), along with other documents for BIND 10, can be found at
+   http://bind10.isc.org/docs.
+
+   --------------------------------------------------------------------------
+
+   Table of Contents
+
+   1. Introduction
+
+                Supported Platforms
+
+                Required Software
+
+                Starting and Stopping the Server
+
+                Managing BIND 10
+
+   2. Installation
+
+                Building Requirements
+
+                Quick start
+
+                Installation from source
+
+                             Download Tar File
+
+                             Retrieve from Git
+
+                             Configure before the build
+
+                             Build
+
+                             Install
+
+                             Install Hierarchy
+
+   3. Starting BIND10 with bind10
+
+                Starting BIND 10
+
+                Configuration of started processes
+
+   4. Command channel
+
+   5. Configuration manager
+
+   6. Remote control daemon
+
+                Configuration specification for b10-cmdctl
+
+   7. Control and configure user interface
+
+   8. Authoritative Server
+
+                Server Configurations
+
+                Data Source Backends
+
+                Loading Master Zones Files
+
+   9. Incoming Zone Transfers
+
+                Configuration for Incoming Zone Transfers
+
+                Enabling IXFR
+
+                Trigger an Incoming Zone Transfer Manually
+
+   10. Outbound Zone Transfers
+
+   11. Secondary Manager
+
+   12. Recursive Name Server
+
+                Access Control
+
+                Forwarding
+
+   13. Statistics
+
+   14. Logging
+
+                Logging configuration
+
+                             Loggers
+
+                             Output Options
+
+                             Example session
+
+                Logging Message Format
+
+   List of Tables
+
+   3.1.
+
+Chapter 1. Introduction
+
+   Table of Contents
+
+   Supported Platforms
+
+   Required Software
+
+   Starting and Stopping the Server
+
+   Managing BIND 10
+
+   BIND is the popular implementation of a DNS server, developer interfaces,
+   and DNS tools. BIND 10 is a rewrite of BIND 9. BIND 10 is written in C++
+   and Python and provides a modular environment for serving and maintaining
+   DNS.
+
+  Note
+
+   This guide covers the experimental prototype of BIND 10 version 20111021.
+
+  Note
+
+   BIND 10 provides a EDNS0- and DNSSEC-capable authoritative DNS server and
+   a caching recursive name server which also provides forwarding.
+
+Supported Platforms
+
+   BIND 10 builds have been tested on Debian GNU/Linux 5, Ubuntu 9.10, NetBSD
+   5, Solaris 10, FreeBSD 7 and 8, and CentOS Linux 5.3. It has been tested
+   on Sparc, i386, and amd64 hardware platforms. It is planned for BIND 10 to
+   build, install and run on Windows and standard Unix-type platforms.
+
+Required Software
+
+   BIND 10 requires Python 3.1. Later versions may work, but Python 3.1 is
+   the minimum version which will work.
+
+   BIND 10 uses the Botan crypto library for C++. It requires at least Botan
+   version 1.8.
+
+   BIND 10 uses the log4cplus C++ logging library. It requires at least
+   log4cplus version 1.0.3.
+
+   The authoritative server requires SQLite 3.3.9 or newer. The b10-xfrin,
+   b10-xfrout, and b10-zonemgr modules require the libpython3 library and the
+   Python _sqlite3.so module.
+
+  Note
+
+   Some operating systems do not provide these dependencies in their default
+   installation nor standard packages collections. You may need to install
+   them separately.
+
+Starting and Stopping the Server
+
+   BIND 10 is modular. Part of this modularity is accomplished using multiple
+   cooperating processes which, together, provide the server functionality.
+   This is a change from the previous generation of BIND software, which used
+   a single process.
+
+   At first, running many different processes may seem confusing. However,
+   these processes are started, stopped, and maintained by a single command,
+   bind10. This command starts a master process which will start other
+   processes as needed. The processes started by the bind10 command have
+   names starting with "b10-", including:
+
+     o b10-msgq -- Message bus daemon. This process coordinates communication
+       between all of the other BIND 10 processes.
+     o b10-auth -- Authoritative DNS server. This process serves DNS
+       requests.
+     o b10-cfgmgr -- Configuration manager. This process maintains all of the
+       configuration for BIND 10.
+     o b10-cmdctl -- Command and control service. This process allows
+       external control of the BIND 10 system.
+     o b10-resolver -- Recursive name server. This process handles incoming
+       queries.
+     o b10-stats -- Statistics collection daemon. This process collects and
+       reports statistics data.
+     o b10-xfrin -- Incoming zone transfer service. This process is used to
+       transfer a new copy of a zone into BIND 10, when acting as a secondary
+       server.
+     o b10-xfrout -- Outgoing zone transfer service. This process is used to
+       handle transfer requests to send a local zone to a remote secondary
+       server, when acting as a master server.
+     o b10-zonemgr -- Secondary manager. This process keeps track of timers
+       and other necessary information for BIND 10 to act as a slave server.
+
+   These are ran automatically by bind10 and do not need to be run manually.
+
+Managing BIND 10
+
+   Once BIND 10 is running, a few commands are used to interact directly with
+   the system:
+
+     o bindctl -- interactive administration interface. This is a
+       command-line tool which allows an administrator to control BIND 10.
+     o b10-loadzone -- zone file loader. This tool will load standard
+       masterfile-format zone files into BIND 10.
+     o b10-cmdctl-usermgr -- user access control. This tool allows an
+       administrator to authorize additional users to manage BIND 10.
+
+   The tools and modules are covered in full detail in this guide. In
+   addition, manual pages are also provided in the default installation.
+
+   BIND 10 also provides libraries and programmer interfaces for C++ and
+   Python for the message bus, configuration backend, and, of course, DNS.
+   These include detailed developer documentation and code examples.
+
+Chapter 2. Installation
+
+   Table of Contents
+
+   Building Requirements
+
+   Quick start
+
+   Installation from source
+
+                Download Tar File
+
+                Retrieve from Git
+
+                Configure before the build
+
+                Build
+
+                Install
+
+                Install Hierarchy
+
+Building Requirements
+
+   In addition to the run-time requirements, building BIND 10 from source
+   code requires various development include headers.
+
+  Note
+
+   Some operating systems have split their distribution packages into a
+   run-time and a development package. You will need to install the
+   development package versions, which include header files and libraries, to
+   build BIND 10 from source code.
+
+   Building from source code requires the Boost build-time headers. At least
+   Boost version 1.35 is required.
+
+   To build BIND 10, also install the Botan (at least version 1.8) and the
+   log4cplus (at least version 1.0.3) development include headers.
+
+   The Python Library and Python _sqlite3 module are required to enable the
+   Xfrout and Xfrin support.
+
+  Note
+
+   The Python related libraries and modules need to be built for Python 3.1.
+
+   Building BIND 10 also requires a C++ compiler and standard development
+   headers, make, and pkg-config. BIND 10 builds have been tested with GCC
+   g++ 3.4.3, 4.1.2, 4.1.3, 4.2.1, 4.3.2, and 4.4.1; Clang++ 2.8; and Sun C++
+   5.10.
+
+Quick start
+
+  Note
+
+   This quickly covers the standard steps for installing and deploying BIND
+   10 as an authoritative name server using its defaults. For
+   troubleshooting, full customizations and further details, see the
+   respective chapters in the BIND 10 guide.
+
+   To quickly get started with BIND 10, follow these steps.
+
+    1. Install required build dependencies.
+    2. Download the BIND 10 source tar file from
+       ftp://ftp.isc.org/isc/bind10/.
+    3. Extract the tar file:
+
+ $ gzcat bind10-VERSION.tar.gz | tar -xvf -
+
+    4. Go into the source and run configure:
+
+ $ cd bind10-VERSION
+   $ ./configure
+
+    5. Build it:
+
+ $ make
+
+    6. Install it (to default /usr/local):
+
+ $ make install
+
+    7. Start the server:
+
+ $ /usr/local/sbin/bind10
+
+    8. Test it; for example:
+
+ $ dig @127.0.0.1 -c CH -t TXT authors.bind
+
+    9. Load desired zone file(s), for example:
+
+ $ b10-loadzone your.zone.example.org
+
+   10. Test the new zone.
+
+Installation from source
+
+   BIND 10 is open source software written in C++ and Python. It is freely
+   available in source code form from ISC via the Git code revision control
+   system or as a downloadable tar file. It may also be available in
+   pre-compiled ready-to-use packages from operating system vendors.
+
+  Download Tar File
+
+   Downloading a release tar file is the recommended method to obtain the
+   source code.
+
+   The BIND 10 releases are available as tar file downloads from
+   ftp://ftp.isc.org/isc/bind10/. Periodic development snapshots may also be
+   available.
+
+  Retrieve from Git
+
+   Downloading this "bleeding edge" code is recommended only for developers
+   or advanced users. Using development code in a production environment is
+   not recommended.
+
+  Note
+
+   When using source code retrieved via Git additional software will be
+   required: automake (v1.11 or newer), libtoolize, and autoconf (2.59 or
+   newer). These may need to be installed.
+
+   The latest development code, including temporary experiments and
+   un-reviewed code, is available via the BIND 10 code revision control
+   system. This is powered by Git and all the BIND 10 development is public.
+   The leading development is done in the "master".
+
+   The code can be checked out from git://bind10.isc.org/bind10; for example:
+
+ $ git clone git://bind10.isc.org/bind10
+
+   When checking out the code from the code version control system, it
+   doesn't include the generated configure script, Makefile.in files, nor the
+   related configure files. They can be created by running autoreconf with
+   the --install switch. This will run autoconf, aclocal, libtoolize,
+   autoheader, automake, and related commands.
+
+  Configure before the build
+
+   BIND 10 uses the GNU Build System to discover build environment details.
+   To generate the makefiles using the defaults, simply run:
+
+ $ ./configure
+
+   Run ./configure with the --help switch to view the different options. The
+   commonly-used options are:
+
+   --prefix
+           Define the installation location (the default is /usr/local/).
+
+   --with-boost-include
+           Define the path to find the Boost headers.
+
+   --with-pythonpath
+           Define the path to Python 3.1 if it is not in the standard
+           execution path.
+
+   --with-gtest
+           Enable building the C++ Unit Tests using the Google Tests
+           framework. Optionally this can define the path to the gtest header
+           files and library.
+
+   For example, the following configures it to find the Boost headers, find
+   the Python interpreter, and sets the installation location:
+
+ $ ./configure \
+       --with-boost-include=/usr/pkg/include \
+       --with-pythonpath=/usr/pkg/bin/python3.1 \
+       --prefix=/opt/bind10
+
+   If the configure fails, it may be due to missing or old dependencies.
+
+  Build
+
+   After the configure step is complete, to build the executables from the
+   C++ code and prepare the Python scripts, run:
+
+ $ make
+
+  Install
+
+   To install the BIND 10 executables, support files, and documentation, run:
+
+ $ make install
+
+  Note
+
+   The install step may require superuser privileges.
+
+  Install Hierarchy
+
+   The following is the layout of the complete BIND 10 installation:
+
+     o bin/ -- general tools and diagnostic clients.
+     o etc/bind10-devel/ -- configuration files.
+     o lib/ -- libraries and python modules.
+     o libexec/bind10-devel/ -- executables that a user wouldn't normally run
+       directly and are not run independently. These are the BIND 10 modules
+       which are daemons started by the bind10 tool.
+     o sbin/ -- commands used by the system administrator.
+     o share/bind10-devel/ -- configuration specifications.
+     o share/man/ -- manual pages (online documentation).
+     o var/bind10-devel/ -- data source and configuration databases.
+
+Chapter 3. Starting BIND10 with bind10
+
+   Table of Contents
+
+   Starting BIND 10
+
+   Configuration of started processes
+
+   BIND 10 provides the bind10 command which starts up the required
+   processes. bind10 will also restart some processes that exit unexpectedly.
+   This is the only command needed to start the BIND 10 system.
+
+   After starting the b10-msgq communications channel, bind10 connects to it,
+   runs the configuration manager, and reads its own configuration. Then it
+   starts the other modules.
+
+   The b10-sockcreator, b10-msgq and b10-cfgmgr services make up the core.
+   The b10-msgq daemon provides the communication channel between every part
+   of the system. The b10-cfgmgr daemon is always needed by every module, if
+   only to send information about themselves somewhere, but more importantly
+   to ask about their own settings, and about other modules. The
+   b10-sockcreator will allocate sockets for the rest of the system.
+
+   In its default configuration, the bind10 master process will also start up
+   b10-cmdctl for admins to communicate with the system, b10-auth for
+   authoritative DNS service, b10-stats for statistics collection, b10-xfrin
+   for inbound DNS zone transfers, b10-xfrout for outbound DNS zone
+   transfers, and b10-zonemgr for secondary service.
+
+Starting BIND 10
+
+   To start the BIND 10 service, simply run bind10. Run it with the --verbose
+   switch to get additional debugging or diagnostic output.
+
+  Note
+
+   If the setproctitle Python module is detected at start up, the process
+   names for the Python-based daemons will be renamed to better identify them
+   instead of just "python". This is not needed on some operating systems.
+
+Configuration of started processes
+
+   The processes to be started can be configured, with the exception of the
+   b10-sockcreator, b10-msgq and b10-cfgmgr.
+
+   The configuration is in the Boss/components section. Each element
+   represents one component, which is an abstraction of a process (currently
+   there's also one component which doesn't represent a process). If you
+   didn't want to transfer out at all (your server is a slave only), you
+   would just remove the corresponding component from the set, like this and
+   the process would be stopped immediately (and not started on the next
+   startup):
+
+ > config remove Boss/components b10-xfrout
+ > config commit
+
+   To add a process to the set, let's say the resolver (which not started by
+   default), you would do this:
+
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
+ > config commit
+
+   Now, what it means. We add an entry called b10-resolver. It is both a name
+   used to reference this component in the configuration and the name of the
+   process to start. Then we set some parameters on how to start it.
+
+   The special one is for components that need some kind of special care
+   during startup or shutdown. Unless specified, the component is started in
+   usual way. This is the list of components that need to be started in a
+   special way, with the value of special used for them:
+
+   Table 3.1.
+
+   +------------------------------------------------------------------------+
+   | Component    | Special  | Description                                  |
+   |--------------+----------+----------------------------------------------|
+   | b10-auth     | auth     | Authoritative server                         |
+   |--------------+----------+----------------------------------------------|
+   | b10-resolver | resolver | The resolver                                 |
+   |--------------+----------+----------------------------------------------|
+   | b10-cmdctl   | cmdctl   | The command control (remote control          |
+   |              |          | interface)                                   |
+   |--------------+----------+----------------------------------------------|
+   | setuid       | setuid   | Virtual component, see below                 |
+   +------------------------------------------------------------------------+
+
+   The kind specifies how a failure of the component should be handled. If it
+   is set to "dispensable" (the default unless you set something else), it
+   will get started again if it fails. If it is set to "needed" and it fails
+   at startup, the whole bind10 shuts down and exits with error exit code.
+   But if it fails some time later, it is just started again. If you set it
+   to "core", you indicate that the system is not usable without the
+   component and if such component fails, the system shuts down no matter
+   when the failure happened. This is the behaviour of the core components
+   (the ones you can't turn off), but you can declare any other components as
+   core as well if you wish (but you can turn these off, they just can't
+   fail).
+
+   The priority defines order in which the components should start. The ones
+   with higher number are started sooner than the ones with lower ones. If
+   you don't set it, 0 (zero) is used as the priority.
+
+   There are other parameters we didn't use in our example. One of them is
+   "address". It is the address used by the component on the b10-msgq message
+   bus. The special components already know their address, but the usual ones
+   don't. The address is by convention the thing after b10-, with the first
+   letter capital (eg. b10-stats would have "Stats" as its address).
+
+   The last one is process. It is the name of the process to be started. It
+   defaults to the name of the component if not set, but you can use this to
+   override it.
+
+  Note
+
+   This system allows you to start the same component multiple times (by
+   including it in the configuration with different names, but the same
+   process setting). However, the rest of the system doesn't expect such
+   situation, so it would probably not do what you want. Such support is yet
+   to be implemented.
+
+  Note
+
+   The configuration is quite powerful, but that includes a lot of space for
+   mistakes. You could turn off the b10-cmdctl, but then you couldn't change
+   it back the usual way, as it would require it to be running (you would
+   have to find and edit the configuration directly). Also, some modules
+   might have dependencies -- b10-stats-httpd need b10-stats, b10-xfrout
+   needs the b10-auth to be running, etc.
+
+   In short, you should think twice before disabling something here.
+
+   Now, to the mysterious setuid virtual component. If you use the -u option
+   to start the bind10 as root, but change the user later, we need to start
+   the b10-auth or b10-resolver as root (until the socket creator is
+   finished). So we need to specify the time when the switch from root do the
+   given user happens and that's what the setuid component is for. The switch
+   is done at the time the setuid component would be started, if it was a
+   process. The default configuration contains the setuid component with
+   priority 5, b10-auth has 10 to be started before the switch and everything
+   else is without priority, so it is started after the switch.
+
+Chapter 4. Command channel
+
+   The BIND 10 components use the b10-msgq message routing daemon to
+   communicate with other BIND 10 components. The b10-msgq implements what is
+   called the "Command Channel". Processes intercommunicate by sending
+   messages on the command channel. Example messages include shutdown, get
+   configurations, and set configurations. This Command Channel is not used
+   for DNS message passing. It is used only to control and monitor the BIND
+   10 system.
+
+   Administrators do not communicate directly with the b10-msgq daemon. By
+   default, BIND 10 uses port 9912 for the b10-msgq service. It listens on
+   127.0.0.1.
+
+Chapter 5. Configuration manager
+
+   The configuration manager, b10-cfgmgr, handles all BIND 10 system
+   configuration. It provides persistent storage for configuration, and
+   notifies running modules of configuration changes.
+
+   The b10-auth and b10-xfrin daemons and other components receive their
+   configurations from the configuration manager over the b10-msgq command
+   channel.
+
+   The administrator doesn't connect to it directly, but uses a user
+   interface to communicate with the configuration manager via b10-cmdctl's
+   REST-ful interface. b10-cmdctl is covered in Chapter 6, Remote control
+   daemon.
+
+  Note
+
+   The development prototype release only provides the bindctl as a user
+   interface to b10-cmdctl. Upcoming releases will provide another
+   interactive command-line interface and a web-based interface.
+
+   The b10-cfgmgr daemon can send all specifications and all current settings
+   to the bindctl client (via b10-cmdctl).
+
+   b10-cfgmgr relays configurations received from b10-cmdctl to the
+   appropriate modules.
+
+   The stored configuration file is at
+   /usr/local/var/bind10-devel/b10-config.db. (The full path is what was
+   defined at build configure time for --localstatedir. The default is
+   /usr/local/var/.) The format is loosely based on JSON and is directly
+   parseable python, but this may change in a future version. This
+   configuration data file is not manually edited by the administrator.
+
+   The configuration manager does not have any command line arguments.
+   Normally it is not started manually, but is automatically started using
+   the bind10 master process (as covered in Chapter 3, Starting BIND10 with
+   bind10).
+
+Chapter 6. Remote control daemon
+
+   Table of Contents
+
+   Configuration specification for b10-cmdctl
+
+   b10-cmdctl is the gateway between administrators and the BIND 10 system.
+   It is a HTTPS server that uses standard HTTP Digest Authentication for
+   username and password validation. It provides a REST-ful interface for
+   accessing and controlling BIND 10.
+
+   When b10-cmdctl starts, it firsts asks b10-cfgmgr about what modules are
+   running and what their configuration is (over the b10-msgq channel). Then
+   it will start listening on HTTPS for clients -- the user interface -- such
+   as bindctl.
+
+   b10-cmdctl directly sends commands (received from the user interface) to
+   the specified component. Configuration changes are actually commands to
+   b10-cfgmgr so are sent there.
+
+   The HTTPS server requires a private key, such as a RSA PRIVATE KEY. The
+   default location is at /usr/local/etc/bind10-devel/cmdctl-keyfile.pem. (A
+   sample key is at /usr/local/share/bind10-devel/cmdctl-keyfile.pem.) It
+   also uses a certificate located at
+   /usr/local/etc/bind10-devel/cmdctl-certfile.pem. (A sample certificate is
+   at /usr/local/share/bind10-devel/cmdctl-certfile.pem.) This may be a
+   self-signed certificate or purchased from a certification authority.
+
+  Note
+
+   The HTTPS server doesn't support a certificate request from a client (at
+   this time). The b10-cmdctl daemon does not provide a public service. If
+   any client wants to control BIND 10, then a certificate needs to be first
+   received from the BIND 10 administrator. The BIND 10 installation provides
+   a sample PEM bundle that matches the sample key and certificate.
+
+   The b10-cmdctl daemon also requires the user account file located at
+   /usr/local/etc/bind10-devel/cmdctl-accounts.csv. This comma-delimited file
+   lists the accounts with a user name, hashed password, and salt. (A sample
+   file is at /usr/local/share/bind10-devel/cmdctl-accounts.csv. It contains
+   the user named "root" with the password "bind10".)
+
+   The administrator may create a user account with the b10-cmdctl-usermgr
+   tool.
+
+   By default the HTTPS server listens on the localhost port 8080. The port
+   can be set by using the --port command line option. The address to listen
+   on can be set using the --address command line argument. Each HTTPS
+   connection is stateless and timesout in 1200 seconds by default. This can
+   be redefined by using the --idle-timeout command line argument.
+
+Configuration specification for b10-cmdctl
+
+   The configuration items for b10-cmdctl are: key_file cert_file
+   accounts_file
+
+   The control commands are: print_settings shutdown
+
+Chapter 7. Control and configure user interface
+
+  Note
+
+   For this development prototype release, bindctl is the only user
+   interface. It is expected that upcoming releases will provide another
+   interactive command-line interface and a web-based interface for
+   controlling and configuring BIND 10.
+
+   The bindctl tool provides an interactive prompt for configuring,
+   controlling, and querying the BIND 10 components. It communicates directly
+   with a REST-ful interface over HTTPS provided by b10-cmdctl. It doesn't
+   communicate to any other components directly.
+
+   Configuration changes are actually commands to b10-cfgmgr. So when bindctl
+   sends a configuration, it is sent to b10-cmdctl (over a HTTPS connection);
+   then b10-cmdctl sends the command (over a b10-msgq command channel) to
+   b10-cfgmgr which then stores the details and relays (over a b10-msgq
+   command channel) the configuration on to the specified module.
+
+Chapter 8. Authoritative Server
+
+   Table of Contents
+
+   Server Configurations
+
+   Data Source Backends
+
+   Loading Master Zones Files
+
+   The b10-auth is the authoritative DNS server. It supports EDNS0 and
+   DNSSEC. It supports IPv6. Normally it is started by the bind10 master
+   process.
+
+Server Configurations
+
+   b10-auth is configured via the b10-cfgmgr configuration manager. The
+   module name is "Auth". The configuration data item is:
+
+   database_file
+           This is an optional string to define the path to find the SQLite3
+           database file. Note: Later the DNS server will use various data
+           source backends. This may be a temporary setting until then.
+
+   The configuration command is:
+
+   shutdown
+           Stop the authoritative DNS server.
+
+Data Source Backends
+
+  Note
+
+   For the development prototype release, b10-auth supports a SQLite3 data
+   source backend and in-memory data source backend. Upcoming versions will
+   be able to use multiple different data sources, such as MySQL and Berkeley
+   DB.
+
+   By default, the SQLite3 backend uses the data file located at
+   /usr/local/var/bind10-devel/zone.sqlite3. (The full path is what was
+   defined at build configure time for --localstatedir. The default is
+   /usr/local/var/.) This data file location may be changed by defining the
+   "database_file" configuration.
+
+Loading Master Zones Files
+
+   RFC 1035 style DNS master zone files may imported into a BIND 10 data
+   source by using the b10-loadzone utility.
+
+   b10-loadzone supports the following special directives (control entries):
+
+   $INCLUDE
+           Loads an additional zone file. This may be recursive.
+
+   $ORIGIN
+           Defines the relative domain name.
+
+   $TTL
+           Defines the time-to-live value used for following records that
+           don't include a TTL.
+
+   The -o argument may be used to define the default origin for loaded zone
+   file records.
+
+  Note
+
+   In the development prototype release, only the SQLite3 back end is used.
+   By default, it stores the zone data in
+   /usr/local/var/bind10-devel/zone.sqlite3 unless the -d switch is used to
+   set the database filename. Multiple zones are stored in a single SQLite3
+   zone database.
+
+   If you reload a zone already existing in the database, all records from
+   that prior zone disappear and a whole new set appears.
+
+Chapter 9. Incoming Zone Transfers
+
+   Table of Contents
+
+   Configuration for Incoming Zone Transfers
+
+   Enabling IXFR
+
+   Trigger an Incoming Zone Transfer Manually
+
+   Incoming zones are transferred using the b10-xfrin process which is
+   started by bind10. When received, the zone is stored in the corresponding
+   BIND 10 data source, and its records can be served by b10-auth. In
+   combination with b10-zonemgr (for automated SOA checks), this allows the
+   BIND 10 server to provide "secondary" service.
+
+   The b10-xfrin process supports both AXFR and IXFR. Due to some
+   implementation limitations of the current development release, however, it
+   only tries AXFR by default, and care should be taken to enable IXFR.
+
+  Note
+
+   In the current development release of BIND 10, incoming zone transfers are
+   only available for SQLite3-based data sources, that is, they don't work
+   for an in-memory data source.
+
+Configuration for Incoming Zone Transfers
+
+   In practice, you need to specify a list of secondary zones to enable
+   incoming zone transfers for these zones (you can still trigger a zone
+   transfer manually, without a prior configuration (see below)).
+
+   For example, to enable zone transfers for a zone named "example.com"
+   (whose master address is assumed to be 2001:db8::53 here), run the
+   following at the bindctl prompt:
+
+ > config add Xfrin/zones
+ > config set Xfrin/zones[0]/name "example.com"
+ > config set Xfrin/zones[0]/master_addr "2001:db8::53"
+ > config commit
+
+   (We assume there has been no zone configuration before).
+
+Enabling IXFR
+
+   As noted above, b10-xfrin uses AXFR for zone transfers by default. To
+   enable IXFR for zone transfers for a particular zone, set the use_ixfr
+   configuration parameter to true. In the above example of configuration
+   sequence, you'll need to add the following before performing commit:
+
+ > config set Xfrin/zones[0]/use_ixfr true
+
+  Note
+
+   One reason why IXFR is disabled by default in the current release is
+   because it does not support automatic fallback from IXFR to AXFR when it
+   encounters a primary server that doesn't support outbound IXFR (and, not
+   many existing implementations support it). Another, related reason is that
+   it does not use AXFR even if it has no knowledge about the zone (like at
+   the very first time the secondary server is set up). IXFR requires the
+   "current version" of the zone, so obviously it doesn't work in this
+   situation and AXFR is the only workable choice. The current release of
+   b10-xfrin does not make this selection automatically. These features will
+   be implemented in a near future version, at which point we will enable
+   IXFR by default.
+
+Trigger an Incoming Zone Transfer Manually
+
+   To manually trigger a zone transfer to retrieve a remote zone, you may use
+   the bindctl utility. For example, at the bindctl prompt run:
+
+ > Xfrin retransfer zone_name="foo.example.org" master=192.0.2.99
+
+Chapter 10. Outbound Zone Transfers
+
+   The b10-xfrout process is started by bind10. When the b10-auth
+   authoritative DNS server receives an AXFR or IXFR request, b10-auth
+   internally forwards the request to b10-xfrout, which handles the rest of
+   request processing. This is used to provide primary DNS service to share
+   zones to secondary name servers. The b10-xfrout is also used to send
+   NOTIFY messages to secondary servers.
+
+   A global or per zone transfer_acl configuration can be used to control
+   accessibility of the outbound zone transfer service. By default,
+   b10-xfrout allows any clients to perform zone transfers for any zones:
+
+ > config show Xfrout/transfer_acl
+ Xfrout/transfer_acl[0]  {"action": "ACCEPT"}    any     (default)
+
+   You can change this to, for example, rejecting all transfer requests by
+   default while allowing requests for the transfer of zone "example.com"
+   from 192.0.2.1 and 2001:db8::1 as follows:
+
+ > config set Xfrout/transfer_acl[0] {"action": "REJECT"}
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},
+                                                  {"action": "ACCEPT", "from": "2001:db8::1"}]
+ > config commit
+
+  Note
+
+   In the above example the lines for transfer_acl were divided for
+   readability. In the actual input it must be in a single line.
+
+   If you want to require TSIG in access control, a separate TSIG "key ring"
+   must be configured specifically for b10-xfrout as well as a system wide
+   key ring, both containing a consistent set of keys. For example, to change
+   the previous example to allowing requests from 192.0.2.1 signed by a TSIG
+   with a key name of "key.example", you'll need to do this:
+
+ > config set tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]
+ > config commit
+
+   The first line of configuration defines a system wide key ring. This is
+   necessary because the b10-auth server also checks TSIGs and it uses the
+   system wide configuration.
+
+  Note
+
+   In a future version, b10-xfrout will also use the system wide TSIG
+   configuration. The way to specify zone specific configuration (ACLs, etc)
+   is likely to be changed, too.
+
+Chapter 11. Secondary Manager
+
+   The b10-zonemgr process is started by bind10. It keeps track of SOA
+   refresh, retry, and expire timers and other details for BIND 10 to perform
+   as a slave. When the b10-auth authoritative DNS server receives a NOTIFY
+   message, b10-zonemgr may tell b10-xfrin to do a refresh to start an
+   inbound zone transfer. The secondary manager resets its counters when a
+   new zone is transferred in.
+
+  Note
+
+   Access control (such as allowing notifies) is not yet provided. The
+   primary/secondary service is not yet complete.
+
+Chapter 12. Recursive Name Server
+
+   Table of Contents
+
+   Access Control
+
+   Forwarding
+
+   The b10-resolver process is started by bind10.
+
+   The main bind10 process can be configured to select to run either the
+   authoritative or resolver or both. By default, it starts the authoritative
+   service. You may change this using bindctl, for example:
+
+ > config remove Boss/components b10-xfrout
+ > config remove Boss/components b10-xfrin
+ > config remove Boss/components b10-auth
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
+ > config commit
+
+   The master bind10 will stop and start the desired services.
+
+   By default, the resolver listens on port 53 for 127.0.0.1 and ::1. The
+   following example shows how it can be configured to listen on an
+   additional address (and port):
+
+ > config add Resolver/listen_on
+ > config set Resolver/listen_on[2]/address "192.168.1.1"
+ > config set Resolver/listen_on[2]/port 53
+ > config commit
+
+   (Replace the "2" as needed; run "config show Resolver/listen_on" if
+   needed.)
+
+Access Control
+
+   By default, the b10-resolver daemon only accepts DNS queries from the
+   localhost (127.0.0.1 and ::1). The Resolver/query_acl configuration may be
+   used to reject, drop, or allow specific IPs or networks. This
+   configuration list is first match.
+
+   The configuration's action item may be set to "ACCEPT" to allow the
+   incoming query, "REJECT" to respond with a DNS REFUSED return code, or
+   "DROP" to ignore the query without any response (such as a blackhole). For
+   more information, see the respective debugging messages:
+   RESOLVER_QUERY_ACCEPTED, RESOLVER_QUERY_REJECTED, and
+   RESOLVER_QUERY_DROPPED.
+
+   The required configuration's from item is set to an IPv4 or IPv6 address,
+   addresses with an network mask, or to the special lowercase keywords
+   "any6" (for any IPv6 address) or "any4" (for any IPv4 address).
+
+   For example to allow the 192.168.1.0/24 network to use your recursive name
+   server, at the bindctl prompt run:
+
+ > config add Resolver/query_acl
+ > config set Resolver/query_acl[2]/action "ACCEPT"
+ > config set Resolver/query_acl[2]/from "192.168.1.0/24"
+ > config commit
+
+   (Replace the "2" as needed; run "config show Resolver/query_acl" if
+   needed.)
+
+  Note
+
+   This prototype access control configuration syntax may be changed.
+
+Forwarding
+
+   To enable forwarding, the upstream address and port must be configured to
+   forward queries to, such as:
+
+ > config set Resolver/forward_addresses [{ "address": "192.168.1.1", "port": 53 }]
+ > config commit
+
+   (Replace 192.168.1.1 to point to your full resolver.)
+
+   Normal iterative name service can be re-enabled by clearing the forwarding
+   address(es); for example:
+
+ > config set Resolver/forward_addresses []
+ > config commit
+
+Chapter 13. Statistics
+
+   The b10-stats process is started by bind10. It periodically collects
+   statistics data from various modules and aggregates it.
+
+   This stats daemon provides commands to identify if it is running, show
+   specified or all statistics data, show specified or all statistics data
+   schema, and set specified statistics data. For example, using bindctl:
+
+ > Stats show
+ {
+     "Auth": {
+         "queries.tcp": 1749,
+         "queries.udp": 867868
+     },
+     "Boss": {
+         "boot_time": "2011-01-20T16:59:03Z"
+     },
+     "Stats": {
+         "boot_time": "2011-01-20T16:59:05Z",
+         "last_update_time": "2011-01-20T17:04:05Z",
+         "lname": "4d3869d9_a at jreed.example.net",
+         "report_time": "2011-01-20T17:04:06Z",
+         "timestamp": 1295543046.823504
+     }
+ }
+
+
+Chapter 14. Logging
+
+   Table of Contents
+
+   Logging configuration
+
+                Loggers
+
+                Output Options
+
+                Example session
+
+   Logging Message Format
+
+Logging configuration
+
+   The logging system in BIND 10 is configured through the Logging module.
+   All BIND 10 modules will look at the configuration in Logging to see what
+   should be logged and to where.
+
+  Loggers
+
+   Within BIND 10, a message is logged through a component called a "logger".
+   Different parts of BIND 10 log messages through different loggers, and
+   each logger can be configured independently of one another.
+
+   In the Logging module, you can specify the configuration for zero or more
+   loggers; any that are not specified will take appropriate default values..
+
+   The three most important elements of a logger configuration are the name
+   (the component that is generating the messages), the severity (what to
+   log), and the output_options (where to log).
+
+    name (string)
+
+   Each logger in the system has a name, the name being that of the component
+   using it to log messages. For instance, if you want to configure logging
+   for the resolver module, you add an entry for a logger named "Resolver".
+   This configuration will then be used by the loggers in the Resolver
+   module, and all the libraries used by it.
+
+   If you want to specify logging for one specific library within the module,
+   you set the name to module.library. For example, the logger used by the
+   nameserver address store component has the full name of "Resolver.nsas".
+   If there is no entry in Logging for a particular library, it will use the
+   configuration given for the module.
+
+   To illustrate this, suppose you want the cache library to log messages of
+   severity DEBUG, and the rest of the resolver code to log messages of
+   severity INFO. To achieve this you specify two loggers, one with the name
+   "Resolver" and severity INFO, and one with the name "Resolver.cache" with
+   severity DEBUG. As there are no entries for other libraries (e.g. the
+   nsas), they will use the configuration for the module ("Resolver"), so
+   giving the desired behavior.
+
+   One special case is that of a module name of "*" (asterisks), which is
+   interpreted as any module. You can set global logging options by using
+   this, including setting the logging configuration for a library that is
+   used by multiple modules (e.g. "*.config" specifies the configuration
+   library code in whatever module is using it).
+
+   If there are multiple logger specifications in the configuration that
+   might match a particular logger, the specification with the more specific
+   logger name takes precedence. For example, if there are entries for for
+   both "*" and "Resolver", the resolver module -- and all libraries it uses
+   -- will log messages according to the configuration in the second entry
+   ("Resolver"). All other modules will use the configuration of the first
+   entry ("*"). If there was also a configuration entry for "Resolver.cache",
+   the cache library within the resolver would use that in preference to the
+   entry for "Resolver".
+
+   One final note about the naming. When specifying the module name within a
+   logger, use the name of the module as specified in bindctl, e.g.
+   "Resolver" for the resolver module, "Xfrout" for the xfrout module, etc.
+   When the message is logged, the message will include the name of the
+   logger generating the message, but with the module name replaced by the
+   name of the process implementing the module (so for example, a message
+   generated by the "Auth.cache" logger will appear in the output with a
+   logger name of "b10-auth.cache").
+
+    severity (string)
+
+   This specifies the category of messages logged. Each message is logged
+   with an associated severity which may be one of the following (in
+   descending order of severity):
+
+     o FATAL
+     o ERROR
+     o WARN
+     o INFO
+     o DEBUG
+
+   When the severity of a logger is set to one of these values, it will only
+   log messages of that severity, and the severities above it. The severity
+   may also be set to NONE, in which case all messages from that logger are
+   inhibited.
+
+    output_options (list)
+
+   Each logger can have zero or more output_options. These specify where log
+   messages are sent to. These are explained in detail below.
+
+   The other options for a logger are:
+
+    debuglevel (integer)
+
+   When a logger's severity is set to DEBUG, this value specifies what debug
+   messages should be printed. It ranges from 0 (least verbose) to 99 (most
+   verbose).
+
+   If severity for the logger is not DEBUG, this value is ignored.
+
+    additive (true or false)
+
+   If this is true, the output_options from the parent will be used. For
+   example, if there are two loggers configured; "Resolver" and
+   "Resolver.cache", and additive is true in the second, it will write the
+   log messages not only to the destinations specified for "Resolver.cache",
+   but also to the destinations as specified in the output_options in the
+   logger named "Resolver".
+
+  Output Options
+
+   The main settings for an output option are the destination and a value
+   called output, the meaning of which depends on the destination that is
+   set.
+
+    destination (string)
+
+   The destination is the type of output. It can be one of:
+
+     o console
+     o file
+     o syslog
+
+    output (string)
+
+   Depending on what is set as the output destination, this value is
+   interpreted as follows:
+
+   destination is "console"
+           The value of output must be one of "stdout" (messages printed to
+           standard output) or "stderr" (messages printed to standard error).
+
+   destination is "file"
+           The value of output is interpreted as a file name; log messages
+           will be appended to this file.
+
+   destination is "syslog"
+           The value of output is interpreted as the syslog facility (e.g.
+           local0) that should be used for log messages.
+
+   The other options for output_options are:
+
+      flush (true of false)
+
+   Flush buffers after each log message. Doing this will reduce performance
+   but will ensure that if the program terminates abnormally, all messages up
+   to the point of termination are output.
+
+      maxsize (integer)
+
+   Only relevant when destination is file, this is maximum file size of
+   output files in bytes. When the maximum size is reached, the file is
+   renamed and a new file opened. (For example, a ".1" is appended to the
+   name -- if a ".1" file exists, it is renamed ".2", etc.)
+
+   If this is 0, no maximum file size is used.
+
+      maxver (integer)
+
+   Maximum number of old log files to keep around when rolling the output
+   file. Only relevant when destination is "file".
+
+  Example session
+
+   In this example we want to set the global logging to write to the file
+   /var/log/my_bind10.log, at severity WARN. We want the authoritative server
+   to log at DEBUG with debuglevel 40, to a different file
+   (/tmp/debug_messages).
+
+   Start bindctl.
+
+ ["login success "]
+ > config show Logging
+ Logging/loggers []      list
+
+   By default, no specific loggers are configured, in which case the severity
+   defaults to INFO and the output is written to stderr.
+
+   Let's first add a default logger:
+
+ > config add Logging/loggers
+ > config show Logging
+ Logging/loggers/        list    (modified)
+
+   The loggers value line changed to indicate that it is no longer an empty
+   list:
+
+ > config show Logging/loggers
+ Logging/loggers[0]/name ""      string  (default)
+ Logging/loggers[0]/severity     "INFO"  string  (default)
+ Logging/loggers[0]/debuglevel   0       integer (default)
+ Logging/loggers[0]/additive     false   boolean (default)
+ Logging/loggers[0]/output_options       []      list    (default)
+
+   The name is mandatory, so we must set it. We will also change the severity
+   as well. Let's start with the global logger.
+
+ > config set Logging/loggers[0]/name *
+ > config set Logging/loggers[0]/severity WARN
+ > config show Logging/loggers
+ Logging/loggers[0]/name "*"     string  (modified)
+ Logging/loggers[0]/severity     "WARN"  string  (modified)
+ Logging/loggers[0]/debuglevel   0       integer (default)
+ Logging/loggers[0]/additive     false   boolean (default)
+ Logging/loggers[0]/output_options       []      list    (default)
+
+   Of course, we need to specify where we want the log messages to go, so we
+   add an entry for an output option.
+
+ >  config add Logging/loggers[0]/output_options
+ >  config show Logging/loggers[0]/output_options
+ Logging/loggers[0]/output_options[0]/destination        "console"       string  (default)
+ Logging/loggers[0]/output_options[0]/output     "stdout"        string  (default)
+ Logging/loggers[0]/output_options[0]/flush      false   boolean (default)
+ Logging/loggers[0]/output_options[0]/maxsize    0       integer (default)
+ Logging/loggers[0]/output_options[0]/maxver     0       integer (default)
+
+   These aren't the values we are looking for.
+
+ >  config set Logging/loggers[0]/output_options[0]/destination file
+ >  config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log
+ >  config set Logging/loggers[0]/output_options[0]/maxsize 30000
+ >  config set Logging/loggers[0]/output_options[0]/maxver 8
+
+   Which would make the entire configuration for this logger look like:
+
+ >  config show all Logging/loggers
+ Logging/loggers[0]/name "*"     string  (modified)
+ Logging/loggers[0]/severity     "WARN"  string  (modified)
+ Logging/loggers[0]/debuglevel   0       integer (default)
+ Logging/loggers[0]/additive     false   boolean (default)
+ Logging/loggers[0]/output_options[0]/destination        "file"  string  (modified)
+ Logging/loggers[0]/output_options[0]/output     "/var/log/bind10.log"   string  (modified)
+ Logging/loggers[0]/output_options[0]/flush      false   boolean (default)
+ Logging/loggers[0]/output_options[0]/maxsize    30000   integer (modified)
+ Logging/loggers[0]/output_options[0]/maxver     8       integer (modified)
+
+   That looks OK, so let's commit it before we add the configuration for the
+   authoritative server's logger.
+
+ >  config commit
+
+   Now that we have set it, and checked each value along the way, adding a
+   second entry is quite similar.
+
+ >  config add Logging/loggers
+ >  config set Logging/loggers[1]/name Auth
+ >  config set Logging/loggers[1]/severity DEBUG
+ >  config set Logging/loggers[1]/debuglevel 40
+ >  config add Logging/loggers[1]/output_options
+ >  config set Logging/loggers[1]/output_options[0]/destination file
+ >  config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log
+ >  config commit
+
+   And that's it. Once we have found whatever it was we needed the debug
+   messages for, we can simply remove the second logger to let the
+   authoritative server use the same settings as the rest.
+
+ >  config remove Logging/loggers[1]
+ >  config commit
+
+   And every module will now be using the values from the logger named "*".
+
+Logging Message Format
+
+   Each message written by BIND 10 to the configured logging destinations
+   comprises a number of components that identify the origin of the message
+   and, if the message indicates a problem, information about the problem
+   that may be useful in fixing it.
+
+   Consider the message below logged to a file:
+
+ 2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+     ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)
+
+   Note: the layout of messages written to the system logging file (syslog)
+   may be slightly different. This message has been split across two lines
+   here for display reasons; in the logging file, it will appear on one
+   line.)
+
+   The log message comprises a number of components:
+
+   2011-06-15 13:48:22.034
+
+           The date and time at which the message was generated.
+
+   ERROR
+
+           The severity of the message.
+
+   [b10-resolver.asiolink]
+
+           The source of the message. This comprises two components: the BIND
+           10 process generating the message (in this case, b10-resolver) and
+           the module within the program from which the message originated
+           (which in the example is the asynchronous I/O link module,
+           asiolink).
+
+   ASIODNS_OPENSOCK
+
+           The message identification. Every message in BIND 10 has a unique
+           identification, which can be used as an index into the BIND 10
+           Messages Manual (http://bind10.isc.org/docs/bind10-messages.html)
+           from which more information can be obtained.
+
+   error 111 opening TCP socket to 127.0.0.1(53)
+
+           A brief description of the cause of the problem. Within this text,
+           information relating to the condition that caused the message to
+           be logged will be included. In this example, error number 111 (an
+           operating system-specific error number) was encountered when
+           trying to open a TCP connection to port 53 on the local system
+           (address 127.0.0.1). The next step would be to find out the reason
+           for the failure by consulting your system's documentation to
+           identify what error number 111 means.
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 6a42182..e61725f 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -41,9 +41,10 @@
       </para>
       <para>
         This is the reference guide for BIND 10 version &__VERSION__;.
-	The most up-to-date version of this document, along with
-	other documents for BIND 10, can be found at <ulink
-	url="http://bind10.isc.org/docs"/>.  </para> </abstract>
+	The most up-to-date version of this document (in PDF, HTML,
+	and plain text formats), along with other documents for
+	BIND 10, can be found at <ulink url="http://bind10.isc.org/docs"/>.
+	</para> </abstract>
 
       <releaseinfo>This is the reference guide for BIND 10 version
         &__VERSION__;.</releaseinfo>
@@ -146,7 +147,7 @@
 	The processes started by the <command>bind10</command>
 	command have names starting with "b10-", including:
       </para>
-      
+
       <para>
 
         <itemizedlist>
@@ -241,7 +242,7 @@
 
     <section id="managing_once_running">
       <title>Managing BIND 10</title>
-      
+
       <para>
 	Once BIND 10 is running, a few commands are used to interact
 	directly with the system:
@@ -280,7 +281,7 @@
 <!-- TODO point to these -->
       In addition, manual pages are also provided in the default installation.
     </para>
-      
+
 <!--
 bin/
   bindctl*
@@ -387,7 +388,7 @@ Debian and Ubuntu:
       </para>
 
       <orderedlist>
-    
+
         <listitem>
           <simpara>
             Install required build dependencies.
@@ -471,7 +472,7 @@ Debian and Ubuntu:
           Downloading a release tar file is the recommended method to
           obtain the source code.
         </para>
-        
+
         <para>
           The BIND 10 releases are available as tar file downloads from
           <ulink url="ftp://ftp.isc.org/isc/bind10/"/>.
@@ -547,37 +548,37 @@ Debian and Ubuntu:
           <varlistentry>
             <term>--prefix</term>
             <listitem>
-              <simpara>Define the the installation location (the
+              <simpara>Define the installation location (the
                 default is <filename>/usr/local/</filename>).
               </simpara>
-            </listitem> 
+            </listitem>
           </varlistentry>
 
           <varlistentry>
             <term>--with-boost-include</term>
-            <listitem> 
+            <listitem>
               <simpara>Define the path to find the Boost headers.
               </simpara>
-            </listitem> 
+            </listitem>
           </varlistentry>
 
           <varlistentry>
             <term>--with-pythonpath</term>
-            <listitem> 
+            <listitem>
               <simpara>Define the path to Python 3.1 if it is not in the
                 standard execution path.
               </simpara>
-            </listitem> 
+            </listitem>
           </varlistentry>
 
           <varlistentry>
             <term>--with-gtest</term>
-            <listitem> 
+            <listitem>
               <simpara>Enable building the C++ Unit Tests using the
                 Google Tests framework. Optionally this can define the
                 path to the gtest header files and library.
               </simpara>
-            </listitem> 
+            </listitem>
           </varlistentry>
 
           </variablelist>
@@ -696,38 +697,43 @@ Debian and Ubuntu:
         </para>
       </section>
   -->
-  
+
   </chapter>
 
   <chapter id="bind10">
     <title>Starting BIND10 with <command>bind10</command></title>
     <para>
-      BIND 10 provides the <command>bind10</command> command which 
+      BIND 10 provides the <command>bind10</command> command which
       starts up the required processes.
       <command>bind10</command>
-      will also restart processes that exit unexpectedly.
+      will also restart some processes that exit unexpectedly.
       This is the only command needed to start the BIND 10 system.
     </para>
 
     <para>
       After starting the <command>b10-msgq</command> communications channel,
-      <command>bind10</command> connects to it, 
+      <command>bind10</command> connects to it,
       runs the configuration manager, and reads its own configuration.
       Then it starts the other modules.
     </para>
 
     <para>
-      The <command>b10-msgq</command> and <command>b10-cfgmgr</command>
+      The <command>b10-sockcreator</command>, <command>b10-msgq</command> and
+      <command>b10-cfgmgr</command>
       services make up the core. The <command>b10-msgq</command> daemon
       provides the communication channel between every part of the system.
       The <command>b10-cfgmgr</command> daemon is always needed by every
       module, if only to send information about themselves somewhere,
       but more importantly to ask about their own settings, and
-      about other modules.
-      The <command>bind10</command> master process will also start up
+      about other modules. The <command>b10-sockcreator</command> will
+      allocate sockets for the rest of the system.
+    </para>
+
+    <para>
+      In its default configuration, the <command>bind10</command>
+      master process will also start up
       <command>b10-cmdctl</command> for admins to communicate with the
-      system, <command>b10-auth</command> for authoritative DNS service or
-      <command>b10-resolver</command> for recursive name service,
+      system, <command>b10-auth</command> for authoritative DNS service,
       <command>b10-stats</command> for statistics collection,
       <command>b10-xfrin</command> for inbound DNS zone transfers,
       <command>b10-xfrout</command> for outbound DNS zone transfers,
@@ -742,6 +748,169 @@ Debian and Ubuntu:
         get additional debugging or diagnostic output.
       </para>
 <!-- TODO: note it doesn't go into background -->
+
+      <note>
+        <para>
+          If the setproctitle Python module is detected at start up,
+          the process names for the Python-based daemons will be renamed
+          to better identify them instead of just <quote>python</quote>.
+          This is not needed on some operating systems.
+        </para>
+      </note>
+
+    </section>
+    <section id="bind10.config">
+      <title>Configuration of started processes</title>
+      <para>
+        The processes to be started can be configured, with the exception
+        of the <command>b10-sockcreator</command>, <command>b10-msgq</command>
+        and <command>b10-cfgmgr</command>.
+      </para>
+
+      <para>
+        The configuration is in the Boss/components section. Each element
+        represents one component, which is an abstraction of a process
+        (currently there's also one component which doesn't represent
+        a process). If you didn't want to transfer out at all (your server
+        is a slave only), you would just remove the corresponding component
+        from the set, like this and the process would be stopped immediately
+        (and not started on the next startup):
+      <screen>> <userinput>config remove Boss/components b10-xfrout</userinput>
+> <userinput>config commit</userinput></screen>
+      </para>
+
+      <para>
+        To add a process to the set, let's say the resolver (which not started
+        by default), you would do this:
+        <screen>> <userinput>config add Boss/components b10-resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+> <userinput>config commit</userinput></screen></para>
+
+      <para>
+        Now, what it means. We add an entry called b10-resolver. It is both a
+        name used to reference this component in the configuration and the
+        name of the process to start. Then we set some parameters on how to
+        start it.
+      </para>
+
+      <para>
+        The special one is for components that need some kind of special care
+        during startup or shutdown. Unless specified, the component is started
+        in usual way. This is the list of components that need to be started
+        in a special way, with the value of special used for them:
+        <table>
+          <tgroup cols='3' align='left'>
+          <colspec colname='component'/>
+          <colspec colname='special'/>
+          <colspec colname='description'/>
+          <thead><row><entry>Component</entry><entry>Special</entry><entry>Description</entry></row></thead>
+          <tbody>
+            <row><entry>b10-auth</entry><entry>auth</entry><entry>Authoritative server</entry></row>
+            <row><entry>b10-resolver</entry><entry>resolver</entry><entry>The resolver</entry></row>
+            <row><entry>b10-cmdctl</entry><entry>cmdctl</entry><entry>The command control (remote control interface)</entry></row>
+            <row><entry>setuid</entry><entry>setuid</entry><entry>Virtual component, see below</entry></row>
+            <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in boss before the release -->
+          </tbody>
+          </tgroup>
+        </table>
+      </para>
+
+      <para>
+	The kind specifies how a failure of the component should
+	be handled.  If it is set to <quote>dispensable</quote>
+	(the default unless you set something else), it will get
+	started again if it fails. If it is set to <quote>needed</quote>
+	and it fails at startup, the whole <command>bind10</command>
+	shuts down and exits with error exit code. But if it fails
+	some time later, it is just started again. If you set it
+	to <quote>core</quote>, you indicate that the system is
+	not usable without the component and if such component
+	fails, the system shuts down no matter when the failure
+	happened.  This is the behaviour of the core components
+	(the ones you can't turn off), but you can declare any
+	other components as core as well if you wish (but you can
+	turn these off, they just can't fail).
+      </para>
+
+      <para>
+        The priority defines order in which the components should start.
+        The ones with higher number are started sooner than the ones with
+        lower ones. If you don't set it, 0 (zero) is used as the priority.
+      </para>
+
+      <para>
+        There are other parameters we didn't use in our example.
+	One of them is <quote>address</quote>. It is the address
+	used by the component on the <command>b10-msgq</command>
+	message bus. The special components already know their
+	address, but the usual ones don't. The address is by
+	convention the thing after <emphasis>b10-</emphasis>, with
+	the first letter capital (eg. <command>b10-stats</command>
+	would have <quote>Stats</quote> as its address).
+<!-- TODO: this should be simplified so we don't even have to document it -->
+      </para>
+
+<!-- TODO: what does "The special components already know their
+address, but the usual ones don't." mean? -->
+
+<!-- TODO: document params when is enabled -->
+
+      <para>
+        The last one is process. It is the name of the process to be started.
+        It defaults to the name of the component if not set, but you can use
+        this to override it.
+      </para>
+
+      <!-- TODO Add parameters when they work, not implemented yet-->
+
+      <note>
+        <para>
+          This system allows you to start the same component multiple times
+          (by including it in the configuration with different names, but the
+          same process setting). However, the rest of the system doesn't expect
+          such situation, so it would probably not do what you want. Such
+          support is yet to be implemented.
+        </para>
+      </note>
+
+      <note>
+        <para>
+	  The configuration is quite powerful, but that includes
+	  a lot of space for mistakes. You could turn off the
+	  <command>b10-cmdctl</command>, but then you couldn't
+	  change it back the usual way, as it would require it to
+	  be running (you would have to find and edit the configuration
+	  directly).  Also, some modules might have dependencies
+	  -- <command>b10-stats-httpd</command> need
+	  <command>b10-stats</command>, <command>b10-xfrout</command>
+	  needs the <command>b10-auth</command> to be running, etc.
+
+<!-- TODO: should we define dependencies? -->
+
+        </para>
+        <para>
+          In short, you should think twice before disabling something here.
+        </para>
+      </note>
+
+      <para>
+	Now, to the mysterious setuid virtual component. If you
+	use the <command>-u</command> option to start the
+	<command>bind10</command> as root, but change the user
+	later, we need to start the <command>b10-auth</command> or
+	<command>b10-resolver</command> as root (until the socket
+	creator is finished).<!-- TODO --> So we need to specify
+	the time when the switch from root do the given user happens
+	and that's what the setuid component is for. The switch is
+	done at the time the setuid component would be started, if
+	it was a process. The default configuration contains the
+	setuid component with priority 5, <command>b10-auth</command>
+	has 10 to be started before the switch and everything else
+	is without priority, so it is started after the switch.
+      </para>
+
     </section>
 
   </chapter>
@@ -769,7 +938,7 @@ Debian and Ubuntu:
         <command>b10-msgq</command> service.
         It listens on 127.0.0.1.
       </para>
-      
+
 <!-- TODO: this is broken, see Trac #111
       <para>
         To select an alternate port for the <command>b10-msgq</command> to
@@ -1095,10 +1264,10 @@ since we used bind10 -->
         The configuration data item is:
 
         <variablelist>
-    
+
           <varlistentry>
             <term>database_file</term>
-            <listitem> 
+            <listitem>
               <simpara>This is an optional string to define the path to find
                  the SQLite3 database file.
 <!-- TODO: -->
@@ -1120,7 +1289,7 @@ This may be a temporary setting until then.
 
           <varlistentry>
             <term>shutdown</term>
-            <listitem> 
+            <listitem>
               <simpara>Stop the authoritative DNS server.
               </simpara>
 <!-- TODO: what happens when this is sent, will bind10 restart? -->
@@ -1176,7 +1345,7 @@ This may be a temporary setting until then.
 
           <varlistentry>
             <term>$INCLUDE</term>
-            <listitem> 
+            <listitem>
               <simpara>Loads an additional zone file. This may be recursive.
               </simpara>
             </listitem>
@@ -1184,7 +1353,7 @@ This may be a temporary setting until then.
 
           <varlistentry>
             <term>$ORIGIN</term>
-            <listitem> 
+            <listitem>
               <simpara>Defines the relative domain name.
               </simpara>
             </listitem>
@@ -1192,7 +1361,7 @@ This may be a temporary setting until then.
 
           <varlistentry>
             <term>$TTL</term>
-            <listitem> 
+            <listitem>
               <simpara>Defines the time-to-live value used for following
                 records that don't include a TTL.
               </simpara>
@@ -1247,21 +1416,80 @@ TODO
     <para>
       Incoming zones are transferred using the <command>b10-xfrin</command>
       process which is started by <command>bind10</command>.
-      When received, the zone is stored in the BIND 10
-      data store, and its records can be served by
+      When received, the zone is stored in the corresponding BIND 10
+      data source, and its records can be served by
       <command>b10-auth</command>.
       In combination with <command>b10-zonemgr</command> (for
       automated SOA checks), this allows the BIND 10 server to
       provide <quote>secondary</quote> service.
     </para>
 
+    <para>
+      The <command>b10-xfrin</command> process supports both AXFR and
+      IXFR.  Due to some implementation limitations of the current
+      development release, however, it only tries AXFR by default,
+      and care should be taken to enable IXFR.
+    </para>
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
+
     <note><simpara>
-     The current development release of BIND 10 only supports
-     AXFR. (IXFR is not supported.) 
+     In the current development release of BIND 10, incoming zone
+     transfers are only available for SQLite3-based data sources,
+     that is, they don't work for an in-memory data source.
+    </simpara></note>
 
-<!-- TODO: sqlite3 data source only? -->
+    <section>
+      <title>Configuration for Incoming Zone Transfers</title>
+      <para>
+	In practice, you need to specify a list of secondary zones to
+	enable incoming zone transfers for these zones (you can still
+	trigger a zone transfer manually, without a prior configuration
+	(see below)).
+      </para>
 
-    </simpara></note>
+      <para>
+	For example, to enable zone transfers for a zone named "example.com"
+	(whose master address is assumed to be 2001:db8::53 here),
+	run the following at the <command>bindctl</command> prompt:
+
+      <screen>> <userinput>config add Xfrin/zones</userinput>
+> <userinput>config set Xfrin/zones[0]/name "<option>example.com</option>"</userinput>
+> <userinput>config set Xfrin/zones[0]/master_addr "<option>2001:db8::53</option>"</userinput>
+> <userinput>config commit</userinput></screen>
+
+      (We assume there has been no zone configuration before).
+      </para>
+    </section>
+
+    <section>
+      <title>Enabling IXFR</title>
+      <para>
+        As noted above, <command>b10-xfrin</command> uses AXFR for
+        zone transfers by default.  To enable IXFR for zone transfers
+        for a particular zone, set the <userinput>use_ixfr</userinput>
+        configuration parameter to <userinput>true</userinput>.
+        In the above example of configuration sequence, you'll need
+        to add the following before performing <userinput>commit</userinput>:
+      <screen>> <userinput>config set Xfrin/zones[0]/use_ixfr true</userinput></screen>
+      </para>
+
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
+      <note><simpara>
+      One reason why IXFR is disabled by default in the current
+      release is because it does not support automatic fallback from IXFR to
+      AXFR when it encounters a primary server that doesn't support
+      outbound IXFR (and, not many existing implementations support
+      it).  Another, related reason is that it does not use AXFR even
+      if it has no knowledge about the zone (like at the very first
+      time the secondary server is set up).  IXFR requires the
+      "current version" of the zone, so obviously it doesn't work
+      in this situation and AXFR is the only workable choice.
+      The current release of <command>b10-xfrin</command> does not
+      make this selection automatically.
+      These features will be implemented in a near future
+      version, at which point we will enable IXFR by default.
+      </simpara></note>
+    </section>
 
 <!-- TODO:
 
@@ -1274,13 +1502,18 @@ what if a NOTIFY is sent?
 
 -->
 
-    <para>
-       To manually trigger a zone transfer to retrieve a remote zone,
-       you may use the <command>bindctl</command> utility.
-       For example, at the <command>bindctl</command> prompt run:
+    <section>
+      <title>Trigger an Incoming Zone Transfer Manually</title>
+
+      <para>
+	To manually trigger a zone transfer to retrieve a remote zone,
+	you may use the <command>bindctl</command> utility.
+	For example, at the <command>bindctl</command> prompt run:
+
+	<screen>> <userinput>Xfrin retransfer zone_name="<option>foo.example.org</option>" master=<option>192.0.2.99</option></userinput></screen>
+      </para>
+    </section>
 
-       <screen>> <userinput>Xfrin retransfer zone_name="<option>foo.example.org</option>" master=<option>192.0.2.99</option></userinput></screen>
-    </para>
 
 <!-- TODO: can that retransfer be used to identify a new zone? -->
 <!-- TODO: what if doesn't exist at that master IP? -->
@@ -1294,20 +1527,72 @@ what if a NOTIFY is sent?
       The <command>b10-xfrout</command> process is started by
       <command>bind10</command>.
       When the <command>b10-auth</command> authoritative DNS server
-      receives an AXFR request, <command>b10-xfrout</command>
-      sends the zone.
-      This is used to provide master DNS service to share zones
+      receives an AXFR or IXFR request, <command>b10-auth</command>
+      internally forwards the request to <command>b10-xfrout</command>,
+      which handles the rest of request processing.
+      This is used to provide primary DNS service to share zones
       to secondary name servers.
       The <command>b10-xfrout</command> is also used to send
-      NOTIFY messages to slaves.
+      NOTIFY messages to secondary servers.
     </para>
 
+    <para>
+      A global or per zone <option>transfer_acl</option> configuration
+      can be used to control accessibility of the outbound zone
+      transfer service.
+      By default, <command>b10-xfrout</command> allows any clients to
+      perform zone transfers for any zones:
+    </para>
+
+      <screen>> <userinput>config show Xfrout/transfer_acl</userinput>
+Xfrout/transfer_acl[0]	{"action": "ACCEPT"}	any	(default)</screen>
+
+    <para>
+      You can change this to, for example, rejecting all transfer
+      requests by default while allowing requests for the transfer
+      of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
+    </para>
+
+      <screen>> <userinput>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</userinput>
+> <userinput>config add Xfrout/zone_config</userinput>
+> <userinput>config set Xfrout/zone_config[0]/origin "example.com"</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</userinput>
+<userinput>                                                 {"action": "ACCEPT", "from": "2001:db8::1"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
     <note><simpara>
-     The current development release of BIND 10 only supports
-     AXFR. (IXFR is not supported.) 
-     Access control is not yet provided.
+	In the above example the lines
+	for <option>transfer_acl</option> were divided for
+	readability.  In the actual input it must be in a single line.
     </simpara></note>
 
+    <para>
+      If you want to require TSIG in access control, a separate TSIG
+      "key ring" must be configured specifically
+      for <command>b10-xfrout</command> as well as a system wide
+      key ring, both containing a consistent set of keys.
+      For example, to change the previous example to allowing requests
+      from 192.0.2.1 signed by a TSIG with a key name of
+      "key.example", you'll need to do this:
+    </para>
+
+    <screen>> <userinput>config set tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
+    <para>
+      The first line of configuration defines a system wide key ring.
+      This is necessary because the <command>b10-auth</command> server
+      also checks TSIGs and it uses the system wide configuration.
+    </para>
+
+    <note><simpara>
+	In a future version, <command>b10-xfrout</command> will also
+	use the system wide TSIG configuration.
+	The way to specify zone specific configuration (ACLs, etc) is
+	likely to be changed, too.
+    </simpara></note>
 
 <!--
 TODO:
@@ -1360,15 +1645,20 @@ what is XfroutClient xfr_client??
 
     <para>
       The main <command>bind10</command> process can be configured
-      to select to run either the authoritative or resolver.
+      to select to run either the authoritative or resolver or both.
       By default, it starts the authoritative service.
 <!-- TODO: later both -->
 
       You may change this using <command>bindctl</command>, for example:
 
       <screen>
-> <userinput>config set Boss/start_auth false</userinput>
-> <userinput>config set Boss/start_resolver true</userinput>
+> <userinput>config remove Boss/components b10-xfrout</userinput>
+> <userinput>config remove Boss/components b10-xfrin</userinput>
+> <userinput>config remove Boss/components b10-auth</userinput>
+> <userinput>config add Boss/components b10-resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
 > <userinput>config commit</userinput>
 </screen>
 
@@ -1380,16 +1670,85 @@ what is XfroutClient xfr_client??
     </para>
 
     <para>
-      The resolver also needs to be configured to listen on an address
-      and port:
+      By default, the resolver listens on port 53 for 127.0.0.1 and ::1.
+      The following example shows how it can be configured to
+      listen on an additional address (and port):
 
       <screen>
-> <userinput>config set Resolver/listen_on [{ "address": "127.0.0.1", "port": 53 }]</userinput>
+> <userinput>config add Resolver/listen_on</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/address "192.168.1.1"</userinput>
+> <userinput>config set Resolver/listen_on[<replaceable>2</replaceable>]/port 53</userinput>
 > <userinput>config commit</userinput>
 </screen>
     </para>
 
-<!-- TODO: later the above will have some defaults -->
+     <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+       as needed; run <quote><userinput>config show
+       Resolver/listen_on</userinput></quote> if needed.)</simpara>
+<!-- TODO: this example should not include the port, ticket #1185 -->
+
+    <section>
+      <title>Access Control</title>
+
+      <para>
+        By default, the <command>b10-resolver</command> daemon only accepts
+        DNS queries from the localhost (127.0.0.1 and ::1).
+        The <option>Resolver/query_acl</option> configuration may
+	be used to reject, drop, or allow specific IPs or networks.
+        This configuration list is first match.
+      </para>
+
+      <para>
+	The configuration's <option>action</option> item may be
+	set to <quote>ACCEPT</quote> to allow the incoming query,
+	<quote>REJECT</quote> to respond with a DNS REFUSED return
+	code, or <quote>DROP</quote> to ignore the query without
+	any response (such as a blackhole).  For more information,
+	see the respective debugging messages:  <ulink
+	url="bind10-messages.html#RESOLVER_QUERY_ACCEPTED">RESOLVER_QUERY_ACCEPTED</ulink>,
+	<ulink
+	url="bind10-messages.html#RESOLVER_QUERY_REJECTED">RESOLVER_QUERY_REJECTED</ulink>,
+	and <ulink
+url="bind10-messages.html#RESOLVER_QUERY_DROPPED">RESOLVER_QUERY_DROPPED</ulink>.
+      </para>
+
+      <para>
+	The required configuration's <option>from</option> item is set
+        to an IPv4 or IPv6 address, addresses with an network mask, or to
+	the special lowercase keywords <quote>any6</quote> (for
+	any IPv6 address) or <quote>any4</quote> (for any IPv4
+	address).
+      </para>
+
+<!-- TODO:
+/0 is for any address in that address family
+does that need any address too?
+
+TODO: tsig
+-->
+
+      <para>
+	For example to allow the <replaceable>192.168.1.0/24</replaceable>
+	network to use your recursive name server, at the
+	<command>bindctl</command> prompt run:
+      </para>
+
+      <screen>
+> <userinput>config add Resolver/query_acl</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/action "ACCEPT"</userinput>
+> <userinput>config set Resolver/query_acl[<replaceable>2</replaceable>]/from "<replaceable>192.168.1.0/24</replaceable>"</userinput>
+> <userinput>config commit</userinput>
+</screen>
+
+     <simpara>(Replace the <quote><replaceable>2</replaceable></quote>
+       as needed; run <quote><userinput>config show
+       Resolver/query_acl</userinput></quote> if needed.)</simpara>
+
+<!-- TODO: check this -->
+      <note><simpara>This prototype access control configuration
+      syntax may be changed.</simpara></note>
+
+    </section>
 
     <section>
       <title>Forwarding</title>
@@ -1443,24 +1802,30 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
 
     <para>
 
-       This stats daemon provides commands to identify if it is running,
-       show specified or all statistics data, set values, remove data,
-       and reset data.
+       This stats daemon provides commands to identify if it is
+       running, show specified or all statistics data, show specified
+       or all statistics data schema, and set specified statistics
+       data.
 
        For example, using <command>bindctl</command>:
 
        <screen>
 > <userinput>Stats show</userinput>
 {
-    "auth.queries.tcp": 1749,
-    "auth.queries.udp": 867868,
-    "bind10.boot_time": "2011-01-20T16:59:03Z",
-    "report_time": "2011-01-20T17:04:06Z",
-    "stats.boot_time": "2011-01-20T16:59:05Z",
-    "stats.last_update_time": "2011-01-20T17:04:05Z",
-    "stats.lname": "4d3869d9_a at jreed.example.net",
-    "stats.start_time": "2011-01-20T16:59:05Z",
-    "stats.timestamp": 1295543046.823504
+    "Auth": {
+        "queries.tcp": 1749,
+        "queries.udp": 867868
+    },
+    "Boss": {
+        "boot_time": "2011-01-20T16:59:03Z"
+    },
+    "Stats": {
+        "boot_time": "2011-01-20T16:59:05Z",
+        "last_update_time": "2011-01-20T17:04:05Z",
+        "lname": "4d3869d9_a at jreed.example.net",
+        "report_time": "2011-01-20T17:04:06Z",
+        "timestamp": 1295543046.823504
+    }
 }
        </screen>
     </para>
@@ -1470,61 +1835,679 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
   <chapter id="logging">
     <title>Logging</title>
 
-<!-- TODO: how to configure logging, logging destinations etc. -->
+    <section>
+      <title>Logging configuration</title>
 
-    <para>
-        Each message written by BIND 10 to the configured logging destinations
-        comprises a number of components that identify the origin of the
-        message and, if the message indicates a problem, information about the
-        problem that may be useful in fixing it.
-    </para>
+      <para>
 
-    <para>
-        Consider the message below logged to a file:
-        <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
-    ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
-    </para>
+	The logging system in BIND 10 is configured through the
+	Logging module. All BIND 10 modules will look at the
+	configuration in Logging to see what should be logged and
+	to where.
 
-    <para>
-      Note: the layout of messages written to the system logging
-      file (syslog) may be slightly different.  This message has
-      been split across two lines here for display reasons; in the
-      logging file, it will appear on one line.)
-    </para>
+<!-- TODO: what is context of Logging module for readers of this guide? -->
 
-    <para>
-      The log message comprises a number of components:
+      </para>
+
+      <section>
+        <title>Loggers</title>
+
+        <para>
+
+	  Within BIND 10, a message is logged through a component
+	  called a "logger". Different parts of BIND 10 log messages
+	  through different loggers, and each logger can be configured
+	  independently of one another.
+
+        </para>
+
+        <para>
+
+	  In the Logging module, you can specify the configuration
+	  for zero or more loggers; any that are not specified will
+	  take appropriate default values..
+
+        </para>
+
+        <para>
+
+	  The three most important elements of a logger configuration
+	  are the <option>name</option> (the component that is
+	  generating the messages), the <option>severity</option>
+	  (what to log), and the <option>output_options</option>
+	  (where to log).
+
+        </para>
+
+        <section>
+          <title>name (string)</title>
+
+          <para>
+	  Each logger in the system has a name, the name being that
+	  of the component using it to log messages. For instance,
+	  if you want to configure logging for the resolver module,
+	  you add an entry for a logger named <quote>Resolver</quote>. This
+	  configuration will then be used by the loggers in the
+	  Resolver module, and all the libraries used by it.
+              </para>
+
+<!-- TODO: later we will have a way to know names of all modules
+
+Right now you can only see what their names are if they are running
+(a simple 'help' without anything else in bindctl for instance).
+
+ -->
+
+        <para>
+
+	  If you want to specify logging for one specific library
+	  within the module, you set the name to
+	  <replaceable>module.library</replaceable>.  For example, the
+	  logger used by the nameserver address store component
+	  has the full name of <quote>Resolver.nsas</quote>. If
+	  there is no entry in Logging for a particular library,
+	  it will use the configuration given for the module.
+
+<!-- TODO: how to know these specific names?
+
+We will either have to document them or tell the administrator to
+specify module-wide logging and see what appears...
+
+-->
+
+        </para>
+
+        <para>
+
+<!-- TODO: severity has not been covered yet -->
+
+	  To illustrate this, suppose you want the cache library
+	  to log messages of severity DEBUG, and the rest of the
+	  resolver code to log messages of severity INFO. To achieve
+	  this you specify two loggers, one with the name
+	  <quote>Resolver</quote> and severity INFO, and one with
+	  the name <quote>Resolver.cache</quote> with severity
+	  DEBUG. As there are no entries for other libraries (e.g.
+	  the nsas), they will use the configuration for the module
+	  (<quote>Resolver</quote>), so giving the desired behavior.
+
+        </para>
+
+        <para>
+
+	  One special case is that of a module name of <quote>*</quote>
+	  (asterisks), which is interpreted as <emphasis>any</emphasis>
+	  module. You can set global logging options by using this,
+	  including setting the logging configuration for a library
+	  that is used by multiple modules (e.g. <quote>*.config</quote>
+	  specifies the configuration library code in whatever
+	  module is using it).
+
+        </para>
+
+        <para>
+
+	  If there are multiple logger specifications in the
+	  configuration that might match a particular logger, the
+	  specification with the more specific logger name takes
+	  precedence. For example, if there are entries for for
+	  both <quote>*</quote> and <quote>Resolver</quote>, the
+	  resolver module — and all libraries it uses —
+	  will log messages according to the configuration in the
+	  second entry (<quote>Resolver</quote>). All other modules
+	  will use the configuration of the first entry
+	  (<quote>*</quote>). If there was also a configuration
+	  entry for <quote>Resolver.cache</quote>, the cache library
+	  within the resolver would use that in preference to the
+	  entry for <quote>Resolver</quote>.
+
+        </para>
+
+        <para>
+
+	  One final note about the naming. When specifying the
+	  module name within a logger, use the name of the module
+	  as specified in <command>bindctl</command>, e.g.
+	  <quote>Resolver</quote> for the resolver module,
+	  <quote>Xfrout</quote> for the xfrout module, etc. When
+	  the message is logged, the message will include the name
+	  of the logger generating the message, but with the module
+	  name replaced by the name of the process implementing
+	  the module (so for example, a message generated by the
+	  <quote>Auth.cache</quote> logger will appear in the output
+	  with a logger name of <quote>b10-auth.cache</quote>).
+
+        </para>
+
+        </section>
+
+        <section>
+          <title>severity (string)</title>
+
+        <para>
+
+          This specifies the category of messages logged.
+	  Each message is logged with an associated severity which
+	  may be one of the following (in descending order of
+	  severity):
+        </para>
+
+        <itemizedlist>
+          <listitem>
+            <simpara> FATAL </simpara>
+          </listitem>
+
+          <listitem>
+            <simpara> ERROR </simpara>
+          </listitem>
+
+          <listitem>
+            <simpara> WARN </simpara>
+          </listitem>
+
+          <listitem>
+            <simpara> INFO </simpara>
+          </listitem>
+
+          <listitem>
+            <simpara> DEBUG </simpara>
+          </listitem>
+        </itemizedlist>
+
+        <para>
+
+	  When the severity of a logger is set to one of these
+	  values, it will only log messages of that severity, and
+	  the severities above it. The severity may also be set to
+	  NONE, in which case all messages from that logger are
+	  inhibited.
+
+<!-- TODO: worded wrong? If I set to INFO, why would it show DEBUG which is literally below in that list? -->
+
+        </para>
+
+        </section>
+
+        <section>
+          <title>output_options (list)</title>
+
+        <para>
+
+	  Each logger can have zero or more
+	  <option>output_options</option>. These specify where log
+	  messages are sent to. These are explained in detail below.
+
+        </para>
+
+        <para>
+
+          The other options for a logger are:
+
+        </para>
+
+        </section>
+
+        <section>
+          <title>debuglevel (integer)</title>
+
+        <para>
+
+	  When a logger's severity is set to DEBUG, this value
+	  specifies what debug messages should be printed. It ranges
+	  from 0 (least verbose) to 99 (most verbose).
+        </para>
+
+
+<!-- TODO: complete this sentence:
+
+	  The general classification of debug message types is
+
+TODO; there's a ticket to determine these levels, see #1074
+
+ -->
+
+        <para>
+
+          If severity for the logger is not DEBUG, this value is ignored.
+
+        </para>
+
+        </section>
+
+        <section>
+          <title>additive (true or false)</title>
+
+        <para>
+
+	  If this is true, the <option>output_options</option> from
+	  the parent will be used. For example, if there are two
+	  loggers configured; <quote>Resolver</quote> and
+	  <quote>Resolver.cache</quote>, and <option>additive</option>
+	  is true in the second, it will write the log messages
+	  not only to the destinations specified for
+	  <quote>Resolver.cache</quote>, but also to the destinations
+	  as specified in the <option>output_options</option> in
+	  the logger named <quote>Resolver</quote>.
+
+<!-- TODO: check this -->
+
+      </para>
+
+      </section>
+
+      </section>
+
+      <section>
+        <title>Output Options</title>
+
+        <para>
+
+	  The main settings for an output option are the
+	  <option>destination</option> and a value called
+	  <option>output</option>, the meaning of which depends on
+	  the destination that is set.
+
+        </para>
+
+        <section>
+          <title>destination (string)</title>
+
+          <para>
+
+            The destination is the type of output. It can be one of:
+
+          </para>
+
+          <itemizedlist>
+
+            <listitem>
+              <simpara> console </simpara>
+          </listitem>
+
+            <listitem>
+              <simpara> file </simpara>
+          </listitem>
+
+            <listitem>
+              <simpara> syslog </simpara>
+            </listitem>
+
+          </itemizedlist>
+
+        </section>
+
+        <section>
+          <title>output (string)</title>
+
+        <para>
+
+	  Depending on what is set as the output destination, this
+	  value is interpreted as follows:
+
+        </para>
 
         <variablelist>
-        <varlistentry>
-        <term>2011-06-15 13:48:22.034</term>
-        <listitem><para>
-            The date and time at which the message was generated.
-        </para></listitem>
-        </varlistentry>
-
-        <varlistentry>
-        <term>ERROR</term>
-        <listitem><para>
-            The severity of the message.
-        </para></listitem>
-        </varlistentry>
-
-        <varlistentry>
-        <term>[b10-resolver.asiolink]</term>
-        <listitem><para>
-	    The source of the message.  This comprises two components:
-	    the BIND 10 process generating the message (in this
-	    case, <command>b10-resolver</command>) and the module
-	    within the program from which the message originated
-	    (which in the example is the asynchronous I/O link
-	    module, asiolink).
-        </para></listitem>
-        </varlistentry>
-
-        <varlistentry>
-        <term>ASIODNS_OPENSOCK</term>
-        <listitem><para>
+
+          <varlistentry>
+            <term><option>destination</option> is <quote>console</quote></term>
+            <listitem>
+              <simpara>
+		 The value of output must be one of <quote>stdout</quote>
+		 (messages printed to standard output) or
+		 <quote>stderr</quote> (messages printed to standard
+		 error).
+              </simpara>
+            </listitem>
+          </varlistentry>
+
+          <varlistentry>
+            <term><option>destination</option> is <quote>file</quote></term>
+            <listitem>
+              <simpara>
+		The value of output is interpreted as a file name;
+		log messages will be appended to this file.
+              </simpara>
+            </listitem>
+          </varlistentry>
+
+          <varlistentry>
+            <term><option>destination</option> is <quote>syslog</quote></term>
+            <listitem>
+              <simpara>
+		The value of output is interpreted as the
+		<command>syslog</command> facility (e.g.
+		<emphasis>local0</emphasis>) that should be used
+		for log messages.
+              </simpara>
+            </listitem>
+          </varlistentry>
+
+        </variablelist>
+
+        <para>
+
+          The other options for <option>output_options</option> are:
+
+        </para>
+
+        <section>
+          <title>flush (true of false)</title>
+
+          <para>
+	    Flush buffers after each log message. Doing this will
+	    reduce performance but will ensure that if the program
+	    terminates abnormally, all messages up to the point of
+	    termination are output.
+          </para>
+
+        </section>
+
+        <section>
+          <title>maxsize (integer)</title>
+
+          <para>
+	    Only relevant when destination is file, this is maximum
+	    file size of output files in bytes. When the maximum
+	    size is reached, the file is renamed and a new file opened.
+	    (For example, a ".1" is appended to the name —
+	    if a ".1" file exists, it is renamed ".2",
+            etc.)
+          </para>
+
+          <para>
+            If this is 0, no maximum file size is used.
+          </para>
+
+        </section>
+
+        <section>
+          <title>maxver (integer)</title>
+
+          <para>
+	    Maximum number of old log files to keep around when
+	    rolling the output file. Only relevant when
+	    <option>destination</option> is <quote>file</quote>.
+          </para>
+
+        </section>
+
+      </section>
+
+      </section>
+
+      <section>
+        <title>Example session</title>
+
+        <para>
+
+	  In this example we want to set the global logging to
+	  write to the file <filename>/var/log/my_bind10.log</filename>,
+	  at severity WARN. We want the authoritative server to
+	  log at DEBUG with debuglevel 40, to a different file
+	  (<filename>/tmp/debug_messages</filename>).
+
+        </para>
+
+        <para>
+
+          Start <command>bindctl</command>.
+
+        </para>
+
+        <para>
+
+           <screen>["login success "]
+> <userinput>config show Logging</userinput>
+Logging/loggers	[]	list
+</screen>
+
+        </para>
+
+        <para>
+
+	  By default, no specific loggers are configured, in which
+	  case the severity defaults to INFO and the output is
+	  written to stderr.
+
+        </para>
+
+        <para>
+
+          Let's first add a default logger:
+
+        </para>
+
+<!-- TODO: adding the empty loggers makes no sense -->
+        <para>
+
+          <screen><userinput>> config add Logging/loggers</userinput>
+> <userinput>config show Logging</userinput>
+Logging/loggers/	list	(modified)
+</screen>
+
+        </para>
+
+        <para>
+
+	  The loggers value line changed to indicate that it is no
+	  longer an empty list:
+
+        </para>
+
+        <para>
+
+          <screen>> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name	""	string	(default)
+Logging/loggers[0]/severity	"INFO"	string	(default)
+Logging/loggers[0]/debuglevel	0	integer	(default)
+Logging/loggers[0]/additive	false	boolean	(default)
+Logging/loggers[0]/output_options	[]	list	(default)
+</screen>
+
+        </para>
+
+        <para>
+
+	  The name is mandatory, so we must set it. We will also
+	  change the severity as well. Let's start with the global
+	  logger.
+
+        </para>
+
+        <para>
+
+          <screen>> <userinput>config set Logging/loggers[0]/name *</userinput>
+> <userinput>config set Logging/loggers[0]/severity WARN</userinput>
+> <userinput>config show Logging/loggers</userinput>
+Logging/loggers[0]/name	"*"	string	(modified)
+Logging/loggers[0]/severity	"WARN"	string	(modified)
+Logging/loggers[0]/debuglevel	0	integer	(default)
+Logging/loggers[0]/additive	false	boolean	(default)
+Logging/loggers[0]/output_options	[]	list	(default)
+</screen>
+
+        </para>
+
+        <para>
+
+	  Of course, we need to specify where we want the log
+	  messages to go, so we add an entry for an output option.
+
+        </para>
+
+        <para>
+
+          <screen>> <userinput> config add Logging/loggers[0]/output_options</userinput>
+> <userinput> config show Logging/loggers[0]/output_options</userinput>
+Logging/loggers[0]/output_options[0]/destination	"console"	string	(default)
+Logging/loggers[0]/output_options[0]/output	"stdout"	string	(default)
+Logging/loggers[0]/output_options[0]/flush	false	boolean	(default)
+Logging/loggers[0]/output_options[0]/maxsize	0	integer	(default)
+Logging/loggers[0]/output_options[0]/maxver	0	integer	(default)
+</screen>
+
+
+        </para>
+
+        <para>
+
+          These aren't the values we are looking for.
+
+        </para>
+
+        <para>
+
+          <screen>> <userinput> config set Logging/loggers[0]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/output /var/log/bind10.log</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxsize 30000</userinput>
+> <userinput> config set Logging/loggers[0]/output_options[0]/maxver 8</userinput>
+</screen>
+
+        </para>
+
+        <para>
+
+	  Which would make the entire configuration for this logger
+	  look like:
+
+        </para>
+
+        <para>
+
+          <screen>> <userinput> config show all Logging/loggers</userinput>
+Logging/loggers[0]/name	"*"	string	(modified)
+Logging/loggers[0]/severity	"WARN"	string	(modified)
+Logging/loggers[0]/debuglevel	0	integer	(default)
+Logging/loggers[0]/additive	false	boolean	(default)
+Logging/loggers[0]/output_options[0]/destination	"file"	string	(modified)
+Logging/loggers[0]/output_options[0]/output	"/var/log/bind10.log"	string	(modified)
+Logging/loggers[0]/output_options[0]/flush	false	boolean	(default)
+Logging/loggers[0]/output_options[0]/maxsize	30000	integer	(modified)
+Logging/loggers[0]/output_options[0]/maxver	8	integer	(modified)
+</screen>
+
+        </para>
+
+        <para>
+
+	  That looks OK, so let's commit it before we add the
+	  configuration for the authoritative server's logger.
+
+        </para>
+
+        <para>
+
+          <screen>> <userinput> config commit</userinput></screen>
+
+        </para>
+
+        <para>
+
+	  Now that we have set it, and checked each value along
+	  the way, adding a second entry is quite similar.
+
+        </para>
+
+        <para>
+
+          <screen>> <userinput> config add Logging/loggers</userinput>
+> <userinput> config set Logging/loggers[1]/name Auth</userinput>
+> <userinput> config set Logging/loggers[1]/severity DEBUG</userinput>
+> <userinput> config set Logging/loggers[1]/debuglevel 40</userinput>
+> <userinput> config add Logging/loggers[1]/output_options</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/destination file</userinput>
+> <userinput> config set Logging/loggers[1]/output_options[0]/output /tmp/auth_debug.log</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+        </para>
+
+        <para>
+
+	  And that's it. Once we have found whatever it was we
+	  needed the debug messages for, we can simply remove the
+	  second logger to let the authoritative server use the
+	  same settings as the rest.
+
+        </para>
+
+        <para>
+
+          <screen>> <userinput> config remove Logging/loggers[1]</userinput>
+> <userinput> config commit</userinput>
+</screen>
+
+        </para>
+
+        <para>
+
+	  And every module will now be using the values from the
+	  logger named <quote>*</quote>.
+
+        </para>
+
+      </section>
+
+    </section>
+
+    <section>
+      <title>Logging Message Format</title>
+
+      <para>
+	  Each message written by BIND 10 to the configured logging
+	  destinations comprises a number of components that identify
+	  the origin of the message and, if the message indicates
+	  a problem, information about the problem that may be
+	  useful in fixing it.
+      </para>
+
+      <para>
+          Consider the message below logged to a file:
+          <screen>2011-06-15 13:48:22.034 ERROR [b10-resolver.asiolink]
+    ASIODNS_OPENSOCK error 111 opening TCP socket to 127.0.0.1(53)</screen>
+      </para>
+
+      <para>
+        Note: the layout of messages written to the system logging
+        file (syslog) may be slightly different.  This message has
+        been split across two lines here for display reasons; in the
+        logging file, it will appear on one line.)
+      </para>
+
+      <para>
+        The log message comprises a number of components:
+
+          <variablelist>
+          <varlistentry>
+          <term>2011-06-15 13:48:22.034</term>
+<!-- TODO: timestamp repeated even if using syslog? -->
+          <listitem><para>
+              The date and time at which the message was generated.
+          </para></listitem>
+          </varlistentry>
+
+          <varlistentry>
+          <term>ERROR</term>
+          <listitem><para>
+              The severity of the message.
+          </para></listitem>
+          </varlistentry>
+
+          <varlistentry>
+          <term>[b10-resolver.asiolink]</term>
+          <listitem><para>
+            The source of the message.  This comprises two components:
+            the BIND 10 process generating the message (in this
+            case, <command>b10-resolver</command>) and the module
+            within the program from which the message originated
+            (which in the example is the asynchronous I/O link
+            module, asiolink).
+          </para></listitem>
+          </varlistentry>
+
+          <varlistentry>
+          <term>ASIODNS_OPENSOCK</term>
+          <listitem><para>
 	    The message identification.  Every message in BIND 10
 	    has a unique identification, which can be used as an
 	    index into the <ulink
@@ -1532,25 +2515,29 @@ then change those defaults with config set Resolver/forward_addresses[0]/address
 	    Manual</citetitle></ulink> (<ulink
 	    url="http://bind10.isc.org/docs/bind10-messages.html"
 	    />) from which more information can be obtained.
-        </para></listitem>
-        </varlistentry>
-
-        <varlistentry>
-        <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
-        <listitem><para>
-            A brief description of the cause of the problem.  Within this text,
-            information relating to the condition that caused the message to
-            be logged will be included.  In this example, error number 111
-            (an operating system-specific error number) was encountered when
-            trying to open a TCP connection to port 53 on the local system
-            (address 127.0.0.1).  The next step would be to find out the reason
-            for the failure by consulting your system's documentation to
-            identify what error number 111 means.
-        </para></listitem>
-        </varlistentry>
-        </variablelist>
+          </para></listitem>
+          </varlistentry>
+
+          <varlistentry>
+          <term>error 111 opening TCP socket to 127.0.0.1(53)</term>
+          <listitem><para>
+	      A brief description of the cause of the problem.
+	      Within this text, information relating to the condition
+	      that caused the message to be logged will be included.
+	      In this example, error number 111 (an operating
+	      system-specific error number) was encountered when
+	      trying to open a TCP connection to port 53 on the
+	      local system (address 127.0.0.1).  The next step
+	      would be to find out the reason for the failure by
+	      consulting your system's documentation to identify
+	      what error number 111 means.
+          </para></listitem>
+          </varlistentry>
+          </variablelist>
+      </para>
+
+    </section>
 
-    </para>
   </chapter>
 
 <!-- TODO: how to help: run unit tests, join lists, review trac tickets -->
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
index b075e96..f2f57f1 100644
--- a/doc/guide/bind10-messages.html
+++ b/doc/guide/bind10-messages.html
@@ -1,10 +1,10 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110519. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168230298903"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
-        20110519.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20111021. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+        20111021.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
 	  Internet Systems Consortium (ISC). It includes DNS libraries
 	  and modular components for controlling authoritative and
 	  recursive DNS servers.
       </p><p>
-        This is the messages manual for BIND 10 version 20110519.
+        This is the messages manual for BIND 10 version 20111021.
 	    The most up-to-date version of this document, along with
 	    other documents for BIND 10, can be found at
         <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
@@ -26,38 +26,705 @@
       For information on configuring and using BIND 10 logging,
       refer to the <a class="ulink" href="bind10-guide.html" target="_top">BIND 10 Guide</a>.
     </p></div><div class="chapter" title="Chapter 2. BIND 10 Messages"><div class="titlepage"><div><div><h2 class="title"><a name="messages"></a>Chapter 2. BIND 10 Messages</h2></div></div></div><p>
-      </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCHCOMP"></a><span class="term">ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</span></dt><dd><p>
-A debug message, this records the the upstream fetch (a query made by the
+      </p><div class="variablelist"><dl><dt><a name="ASIODNS_FETCH_COMPLETED"></a><span class="term">ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</span></dt><dd><p>
+A debug message, this records that the upstream fetch (a query made by the
 resolver on behalf of its client) to the specified address has completed.
-</p></dd><dt><a name="ASIODNS_FETCHSTOP"></a><span class="term">ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_FETCH_STOPPED"></a><span class="term">ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</span></dt><dd><p>
 An external component has requested the halting of an upstream fetch.  This
 is an allowed operation, and the message should only appear if debug is
 enabled.
-</p></dd><dt><a name="ASIODNS_OPENSOCK"></a><span class="term">ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_OPEN_SOCKET"></a><span class="term">ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</span></dt><dd><p>
 The asynchronous I/O code encountered an error when trying to open a socket
 of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
 message.
-</p></dd><dt><a name="ASIODNS_RECVSOCK"></a><span class="term">ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_RECVTMO"></a><span class="term">ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
+</p></dd><dt><a name="ASIODNS_READ_DATA"></a><span class="term">ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol.  The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_READ_TIMEOUT"></a><span class="term">ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</span></dt><dd><p>
 An upstream fetch from the specified address timed out.  This may happen for
 any number of reasons and is most probably a problem at the remote server
 or a problem on the network.  The message will only appear if debug is
 enabled.
-</p></dd><dt><a name="ASIODNS_SENDSOCK"></a><span class="term">ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
-</p></dd><dt><a name="ASIODNS_UNKORIGIN"></a><span class="term">ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
-</p></dd><dt><a name="ASIODNS_UNKRESULT"></a><span class="term">ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message).  This message should
-not appear and may indicate an internal error.  Please enter a bug report.
+</p></dd><dt><a name="ASIODNS_SEND_DATA"></a><span class="term">ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</span></dt><dd><p>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol.  The number of the system
+error that caused the problem is given in the message.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_ORIGIN"></a><span class="term">ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</span></dt><dd><p>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</p></dd><dt><a name="ASIODNS_UNKNOWN_RESULT"></a><span class="term">ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</span></dt><dd><p>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message).  Please submit a bug report.
+</p></dd><dt><a name="AUTH_AXFR_ERROR"></a><span class="term">AUTH_AXFR_ERROR error handling AXFR request: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</p></dd><dt><a name="AUTH_AXFR_UDP"></a><span class="term">AUTH_AXFR_UDP AXFR query received over UDP</span></dt><dd><p>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_COMMAND_FAILED"></a><span class="term">AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</span></dt><dd><p>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_CREATED"></a><span class="term">AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</span></dt><dd><p>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</span></dt><dd><p>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_CHANNEL_STARTED"></a><span class="term">AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</span></dt><dd><p>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_CONFIG_LOAD_FAIL"></a><span class="term">AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</span></dt><dd><p>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</p></dd><dt><a name="AUTH_CONFIG_UPDATE_FAIL"></a><span class="term">AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</span></dt><dd><p>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</p></dd><dt><a name="AUTH_DATA_SOURCE"></a><span class="term">AUTH_DATA_SOURCE data source database file: %1</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</p></dd><dt><a name="AUTH_DNS_SERVICES_CREATED"></a><span class="term">AUTH_DNS_SERVICES_CREATED DNS services created</span></dt><dd><p>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_HEADER_PARSE_FAIL"></a><span class="term">AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</p></dd><dt><a name="AUTH_INVALID_STATISTICS_DATA"></a><span class="term">AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified</span></dt><dd><p>
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
+</p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_LOAD_ZONE"></a><span class="term">AUTH_LOAD_ZONE loaded zone %1/%2</span></dt><dd><p>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_DISABLED"></a><span class="term">AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</p></dd><dt><a name="AUTH_MEM_DATASRC_ENABLED"></a><span class="term">AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</span></dt><dd><p>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</p></dd><dt><a name="AUTH_NOTIFY_QUESTIONS"></a><span class="term">AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</p></dd><dt><a name="AUTH_NOTIFY_RRTYPE"></a><span class="term">AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</span></dt><dd><p>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</p></dd><dt><a name="AUTH_NO_STATS_SESSION"></a><span class="term">AUTH_NO_STATS_SESSION session interface for statistics is not available</span></dt><dd><p>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</p></dd><dt><a name="AUTH_NO_XFRIN"></a><span class="term">AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</span></dt><dd><p>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PARSE_ERROR"></a><span class="term">AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</p></dd><dt><a name="AUTH_PACKET_PROTOCOL_ERROR"></a><span class="term">AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</span></dt><dd><p>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</p></dd><dt><a name="AUTH_PACKET_RECEIVED"></a><span class="term">AUTH_PACKET_RECEIVED message received:\n%1</span></dt><dd><p>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</p><p>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_PROCESS_FAIL"></a><span class="term">AUTH_PROCESS_FAIL message processing failure: %1</span></dt><dd><p>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</p><p>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server.  Please open a
+bug ticket for this issue.
+</p></dd><dt><a name="AUTH_RECEIVED_COMMAND"></a><span class="term">AUTH_RECEIVED_COMMAND command '%1' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</p></dd><dt><a name="AUTH_RECEIVED_SENDSTATS"></a><span class="term">AUTH_RECEIVED_SENDSTATS command 'sendstats' received</span></dt><dd><p>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</p></dd><dt><a name="AUTH_RESPONSE_RECEIVED"></a><span class="term">AUTH_RESPONSE_RECEIVED received response message, ignoring</span></dt><dd><p>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</p></dd><dt><a name="AUTH_SEND_ERROR_RESPONSE"></a><span class="term">AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SEND_NORMAL_RESPONSE"></a><span class="term">AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</span></dt><dd><p>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</p><p>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</p></dd><dt><a name="AUTH_SERVER_CREATED"></a><span class="term">AUTH_SERVER_CREATED server created</span></dt><dd><p>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</p></dd><dt><a name="AUTH_SERVER_FAILED"></a><span class="term">AUTH_SERVER_FAILED server failed: %1</span></dt><dd><p>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</p></dd><dt><a name="AUTH_SERVER_STARTED"></a><span class="term">AUTH_SERVER_STARTED server started</span></dt><dd><p>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</p></dd><dt><a name="AUTH_SQLITE3"></a><span class="term">AUTH_SQLITE3 nothing to do for loading sqlite3</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_CREATED"></a><span class="term">AUTH_STATS_CHANNEL_CREATED STATS session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel.  It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_STATS_COMMS"></a><span class="term">AUTH_STATS_COMMS communication error in sending statistics data: %1</span></dt><dd><p>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMEOUT"></a><span class="term">AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</span></dt><dd><p>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</p></dd><dt><a name="AUTH_STATS_TIMER_DISABLED"></a><span class="term">AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</p></dd><dt><a name="AUTH_STATS_TIMER_SET"></a><span class="term">AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</span></dt><dd><p>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</p></dd><dt><a name="AUTH_UNSUPPORTED_OPCODE"></a><span class="term">AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</span></dt><dd><p>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_CREATED"></a><span class="term">AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process.  It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</p></dd><dt><a name="AUTH_XFRIN_CHANNEL_ESTABLISHED"></a><span class="term">AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</span></dt><dd><p>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process.  It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</p></dd><dt><a name="AUTH_ZONEMGR_COMMS"></a><span class="term">AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</p></dd><dt><a name="AUTH_ZONEMGR_ERROR"></a><span class="term">AUTH_ZONEMGR_ERROR received error response from zone manager: %1</span></dt><dd><p>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</p></dd><dt><a name="BIND10_CHECK_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</span></dt><dd><p>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</p></dd><dt><a name="BIND10_COMPONENT_FAILED"></a><span class="term">BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</span></dt><dd><p>
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
+</p></dd><dt><a name="BIND10_COMPONENT_RESTART"></a><span class="term">BIND10_COMPONENT_RESTART component %1 is about to restart</span></dt><dd><p>
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</p></dd><dt><a name="BIND10_COMPONENT_START"></a><span class="term">BIND10_COMPONENT_START component %1 is starting</span></dt><dd><p>
+The named component is about to be started by the boss process.
+</p></dd><dt><a name="BIND10_COMPONENT_START_EXCEPTION"></a><span class="term">BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</span></dt><dd><p>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</p></dd><dt><a name="BIND10_COMPONENT_STOP"></a><span class="term">BIND10_COMPONENT_STOP component %1 is being stopped</span></dt><dd><p>
+A component is about to be asked to stop willingly by the boss.
+</p></dd><dt><a name="BIND10_COMPONENT_UNSATISFIED"></a><span class="term">BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</span></dt><dd><p>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_BUILD"></a><span class="term">BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'</span></dt><dd><p>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_PLAN_INTERRUPTED"></a><span class="term">BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</span></dt><dd><p>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_RECONFIGURE"></a><span class="term">BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</span></dt><dd><p>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_RUN"></a><span class="term">BIND10_CONFIGURATOR_RUN running plan of %1 tasks</span></dt><dd><p>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_START"></a><span class="term">BIND10_CONFIGURATOR_START bind10 component configurator is starting up</span></dt><dd><p>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_STOP"></a><span class="term">BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</span></dt><dd><p>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</p></dd><dt><a name="BIND10_CONFIGURATOR_TASK"></a><span class="term">BIND10_CONFIGURATOR_TASK performing task %1 on %2</span></dt><dd><p>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+</p></dd><dt><a name="BIND10_INVALID_STATISTICS_DATA"></a><span class="term">BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified</span></dt><dd><p>
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
+</p></dd><dt><a name="BIND10_INVALID_USER"></a><span class="term">BIND10_INVALID_USER invalid user: %1</span></dt><dd><p>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</p></dd><dt><a name="BIND10_KILLING_ALL_PROCESSES"></a><span class="term">BIND10_KILLING_ALL_PROCESSES killing all started processes</span></dt><dd><p>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</p></dd><dt><a name="BIND10_KILL_PROCESS"></a><span class="term">BIND10_KILL_PROCESS killing process %1</span></dt><dd><p>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</p></dd><dt><a name="BIND10_MSGQ_ALREADY_RUNNING"></a><span class="term">BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</span></dt><dd><p>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</p></dd><dt><a name="BIND10_MSGQ_DISAPPEARED"></a><span class="term">BIND10_MSGQ_DISAPPEARED msgq channel disappeared</span></dt><dd><p>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED"></a><span class="term">BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</span></dt><dd><p>
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
+</p></dd><dt><a name="BIND10_READING_BOSS_CONFIGURATION"></a><span class="term">BIND10_READING_BOSS_CONFIGURATION reading boss configuration</span></dt><dd><p>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</p></dd><dt><a name="BIND10_RECEIVED_COMMAND"></a><span class="term">BIND10_RECEIVED_COMMAND received command: %1</span></dt><dd><p>
+The boss module received a command and shall now process it. The command
+is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_NEW_CONFIGURATION"></a><span class="term">BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</span></dt><dd><p>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</p></dd><dt><a name="BIND10_RECEIVED_SIGNAL"></a><span class="term">BIND10_RECEIVED_SIGNAL received signal %1</span></dt><dd><p>
+The boss module received the given signal.
+</p></dd><dt><a name="BIND10_RESURRECTED_PROCESS"></a><span class="term">BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</span></dt><dd><p>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</p></dd><dt><a name="BIND10_RESURRECTING_PROCESS"></a><span class="term">BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</span></dt><dd><p>
+The given process has ended unexpectedly, and is now restarted.
+</p></dd><dt><a name="BIND10_SELECT_ERROR"></a><span class="term">BIND10_SELECT_ERROR error in select() call: %1</span></dt><dd><p>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</p></dd><dt><a name="BIND10_SEND_SIGKILL"></a><span class="term">BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGKILL signal to the given process.
+</p></dd><dt><a name="BIND10_SEND_SIGTERM"></a><span class="term">BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</span></dt><dd><p>
+The boss module is sending a SIGTERM signal to the given process.
+</p></dd><dt><a name="BIND10_SETUID"></a><span class="term">BIND10_SETUID setting UID to %1</span></dt><dd><p>
+The boss switches the user it runs as to the given UID.
+</p></dd><dt><a name="BIND10_SHUTDOWN"></a><span class="term">BIND10_SHUTDOWN stopping the server</span></dt><dd><p>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</p></dd><dt><a name="BIND10_SHUTDOWN_COMPLETE"></a><span class="term">BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</span></dt><dd><p>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_CAUSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</span></dt><dd><p>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_RESPONSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</span></dt><dd><p>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_EOF"></a><span class="term">BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</span></dt><dd><p>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_INIT"></a><span class="term">BIND10_SOCKCREATOR_INIT initializing socket creator parser</span></dt><dd><p>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_KILL"></a><span class="term">BIND10_SOCKCREATOR_KILL killing the socket creator</span></dt><dd><p>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TERMINATE"></a><span class="term">BIND10_SOCKCREATOR_TERMINATE terminating socket creator</span></dt><dd><p>
+The boss module sends a request to terminate to the socket creator.
+</p></dd><dt><a name="BIND10_SOCKCREATOR_TRANSPORT_ERROR"></a><span class="term">BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</span></dt><dd><p>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</p></dd><dt><a name="BIND10_SOCKET_CREATED"></a><span class="term">BIND10_SOCKET_CREATED successfully created socket %1</span></dt><dd><p>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</p></dd><dt><a name="BIND10_SOCKET_ERROR"></a><span class="term">BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</span></dt><dd><p>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</p></dd><dt><a name="BIND10_SOCKET_GET"></a><span class="term">BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</span></dt><dd><p>
+The boss forwards a request for a socket to the socket creator.
+</p></dd><dt><a name="BIND10_STARTED_CC"></a><span class="term">BIND10_STARTED_CC started configuration/command session</span></dt><dd><p>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS"></a><span class="term">BIND10_STARTED_PROCESS started %1</span></dt><dd><p>
+The given process has successfully been started.
+</p></dd><dt><a name="BIND10_STARTED_PROCESS_PID"></a><span class="term">BIND10_STARTED_PROCESS_PID started %1 (PID %2)</span></dt><dd><p>
+The given process has successfully been started, and has the given PID.
+</p></dd><dt><a name="BIND10_STARTING"></a><span class="term">BIND10_STARTING starting BIND10: %1</span></dt><dd><p>
+Informational message on startup that shows the full version.
+</p></dd><dt><a name="BIND10_STARTING_CC"></a><span class="term">BIND10_STARTING_CC starting configuration/command session</span></dt><dd><p>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS"></a><span class="term">BIND10_STARTING_PROCESS starting process %1</span></dt><dd><p>
+The boss module is starting the given process.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT"></a><span class="term">BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given port number.
+</p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT_ADDRESS"></a><span class="term">BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</span></dt><dd><p>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</p></dd><dt><a name="BIND10_STARTUP_COMPLETE"></a><span class="term">BIND10_STARTUP_COMPLETE BIND 10 started</span></dt><dd><p>
+All modules have been successfully started, and BIND 10 is now running.
+</p></dd><dt><a name="BIND10_STARTUP_ERROR"></a><span class="term">BIND10_STARTUP_ERROR error during startup: %1</span></dt><dd><p>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</p></dd><dt><a name="BIND10_STARTUP_UNEXPECTED_MESSAGE"></a><span class="term">BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</span></dt><dd><p>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected.  It may be that processes are starting
+of sequence.
+</p></dd><dt><a name="BIND10_STARTUP_UNRECOGNISED_MESSAGE"></a><span class="term">BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</span></dt><dd><p>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is not recognised.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT_AUTH"></a><span class="term">BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</span></dt><dd><p>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT_RESOLVER"></a><span class="term">BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</span></dt><dd><p>
+The resolver is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_STOP_PROCESS"></a><span class="term">BIND10_STOP_PROCESS asking %1 to shut down</span></dt><dd><p>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</p></dd><dt><a name="BIND10_UNKNOWN_CHILD_PROCESS_ENDED"></a><span class="term">BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</span></dt><dd><p>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</p></dd><dt><a name="BIND10_WAIT_CFGMGR"></a><span class="term">BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</span></dt><dd><p>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing.  This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up.  The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
+</p></dd><dt><a name="CACHE_ENTRY_MISSING_RRSET"></a><span class="term">CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</span></dt><dd><p>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</p></dd><dt><a name="CACHE_LOCALZONE_FOUND"></a><span class="term">CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</span></dt><dd><p>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</p></dd><dt><a name="CACHE_LOCALZONE_UNKNOWN"></a><span class="term">CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</span></dt><dd><p>
+Debug message. The requested data was not found in the local zone data.
+</p></dd><dt><a name="CACHE_LOCALZONE_UPDATE"></a><span class="term">CACHE_LOCALZONE_UPDATE updating local zone element at key %1</span></dt><dd><p>
+Debug message issued when there's update to the local zone section of cache.
+</p></dd><dt><a name="CACHE_MESSAGES_DEINIT"></a><span class="term">CACHE_MESSAGES_DEINIT deinitialized message cache</span></dt><dd><p>
+Debug message. It is issued when the server deinitializes the message cache.
+</p></dd><dt><a name="CACHE_MESSAGES_EXPIRED"></a><span class="term">CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</p></dd><dt><a name="CACHE_MESSAGES_FOUND"></a><span class="term">CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</span></dt><dd><p>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</p></dd><dt><a name="CACHE_MESSAGES_INIT"></a><span class="term">CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</span></dt><dd><p>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</p></dd><dt><a name="CACHE_MESSAGES_REMOVE"></a><span class="term">CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</span></dt><dd><p>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</p></dd><dt><a name="CACHE_MESSAGES_UNCACHEABLE"></a><span class="term">CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</span></dt><dd><p>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</p></dd><dt><a name="CACHE_MESSAGES_UNKNOWN"></a><span class="term">CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</span></dt><dd><p>
+Debug message. The message cache didn't find any entry for the given key.
+</p></dd><dt><a name="CACHE_MESSAGES_UPDATE"></a><span class="term">CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</span></dt><dd><p>
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+</p></dd><dt><a name="CACHE_RESOLVER_DEEPEST"></a><span class="term">CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT"></a><span class="term">CACHE_RESOLVER_INIT initializing resolver cache for class %1</span></dt><dd><p>
+Debug message. The resolver cache is being created for this given class.
+</p></dd><dt><a name="CACHE_RESOLVER_INIT_INFO"></a><span class="term">CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</span></dt><dd><p>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_MSG"></a><span class="term">CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOCAL_RRSET"></a><span class="term">CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</span></dt><dd><p>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_MSG"></a><span class="term">CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</p></dd><dt><a name="CACHE_RESOLVER_LOOKUP_RRSET"></a><span class="term">CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</span></dt><dd><p>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</p></dd><dt><a name="CACHE_RESOLVER_NO_QUESTION"></a><span class="term">CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</span></dt><dd><p>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</p></dd><dt><a name="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating a message in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</span></dt><dd><p>
+Debug message. The resolver is updating an RRset in the cache.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET"></a><span class="term">CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</span></dt><dd><p>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</p></dd><dt><a name="CACHE_RRSET_EXPIRED"></a><span class="term">CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</span></dt><dd><p>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</p></dd><dt><a name="CACHE_RRSET_INIT"></a><span class="term">CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</span></dt><dd><p>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</p></dd><dt><a name="CACHE_RRSET_LOOKUP"></a><span class="term">CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</span></dt><dd><p>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</p></dd><dt><a name="CACHE_RRSET_REMOVE_OLD"></a><span class="term">CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</p></dd><dt><a name="CACHE_RRSET_UNTRUSTED"></a><span class="term">CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</span></dt><dd><p>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</p></dd><dt><a name="CACHE_RRSET_UPDATE"></a><span class="term">CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</span></dt><dd><p>
+Debug message. The RRset is updating its data with this given RRset.
+</p></dd><dt><a name="CC_ASYNC_READ_FAILED"></a><span class="term">CC_ASYNC_READ_FAILED asynchronous read failed</span></dt><dd><p>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</p></dd><dt><a name="CC_CONN_ERROR"></a><span class="term">CC_CONN_ERROR error connecting to message queue (%1)</span></dt><dd><p>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</p></dd><dt><a name="CC_DISCONNECT"></a><span class="term">CC_DISCONNECT disconnecting from message queue daemon</span></dt><dd><p>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</p></dd><dt><a name="CC_ESTABLISH"></a><span class="term">CC_ESTABLISH trying to establish connection with message queue daemon at %1</span></dt><dd><p>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</p></dd><dt><a name="CC_ESTABLISHED"></a><span class="term">CC_ESTABLISHED successfully connected to message queue daemon</span></dt><dd><p>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</p></dd><dt><a name="CC_GROUP_RECEIVE"></a><span class="term">CC_GROUP_RECEIVE trying to receive a message</span></dt><dd><p>
+Debug message, noting that a message is expected to come over the command
+channel.
+</p></dd><dt><a name="CC_GROUP_RECEIVED"></a><span class="term">CC_GROUP_RECEIVED message arrived ('%1', '%2')</span></dt><dd><p>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</p></dd><dt><a name="CC_GROUP_SEND"></a><span class="term">CC_GROUP_SEND sending message '%1' to group '%2'</span></dt><dd><p>
+Debug message, we're about to send a message over the command channel.
+</p></dd><dt><a name="CC_INVALID_LENGTHS"></a><span class="term">CC_INVALID_LENGTHS invalid length parameters (%1, %2)</span></dt><dd><p>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</p></dd><dt><a name="CC_LENGTH_NOT_READY"></a><span class="term">CC_LENGTH_NOT_READY length not ready</span></dt><dd><p>
+There should be data representing the length of message on the socket, but it
+is not there.
+</p></dd><dt><a name="CC_NO_MESSAGE"></a><span class="term">CC_NO_MESSAGE no message ready to be received yet</span></dt><dd><p>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</p></dd><dt><a name="CC_NO_MSGQ"></a><span class="term">CC_NO_MSGQ unable to connect to message queue (%1)</span></dt><dd><p>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</p></dd><dt><a name="CC_READ_ERROR"></a><span class="term">CC_READ_ERROR error reading data from command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</p></dd><dt><a name="CC_READ_EXCEPTION"></a><span class="term">CC_READ_EXCEPTION error reading data from command channel (%1)</span></dt><dd><p>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</p></dd><dt><a name="CC_REPLY"></a><span class="term">CC_REPLY replying to message from '%1' with '%2'</span></dt><dd><p>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</p></dd><dt><a name="CC_SET_TIMEOUT"></a><span class="term">CC_SET_TIMEOUT setting timeout to %1ms</span></dt><dd><p>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</p></dd><dt><a name="CC_START_READ"></a><span class="term">CC_START_READ starting asynchronous read</span></dt><dd><p>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</p></dd><dt><a name="CC_SUBSCRIBE"></a><span class="term">CC_SUBSCRIBE subscribing to communication group %1</span></dt><dd><p>
+Debug message. The program wants to receive messages addressed to this group.
+</p></dd><dt><a name="CC_TIMEOUT"></a><span class="term">CC_TIMEOUT timeout reading data from command channel</span></dt><dd><p>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</p></dd><dt><a name="CC_UNSUBSCRIBE"></a><span class="term">CC_UNSUBSCRIBE unsubscribing from communication group %1</span></dt><dd><p>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</p></dd><dt><a name="CC_WRITE_ERROR"></a><span class="term">CC_WRITE_ERROR error writing data to command channel (%1)</span></dt><dd><p>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</p></dd><dt><a name="CC_ZERO_LENGTH"></a><span class="term">CC_ZERO_LENGTH invalid message length (0)</span></dt><dd><p>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</p></dd><dt><a name="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE"></a><span class="term">CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</span></dt><dd><p>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</p></dd><dt><a name="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE"></a><span class="term">CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</span></dt><dd><p>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</p></dd><dt><a name="CFGMGR_CC_SESSION_ERROR"></a><span class="term">CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</span></dt><dd><p>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</p></dd><dt><a name="CFGMGR_DATA_READ_ERROR"></a><span class="term">CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</span></dt><dd><p>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</p></dd><dt><a name="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION"></a><span class="term">CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</span></dt><dd><p>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</p></dd><dt><a name="CFGMGR_STOPPED_BY_KEYBOARD"></a><span class="term">CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_BAD_CONFIG_DATA"></a><span class="term">CMDCTL_BAD_CONFIG_DATA error in config data: %1</span></dt><dd><p>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</p></dd><dt><a name="CMDCTL_BAD_PASSWORD"></a><span class="term">CMDCTL_BAD_PASSWORD bad password for user: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_ERROR"></a><span class="term">CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</p></dd><dt><a name="CMDCTL_CC_SESSION_TIMEOUT"></a><span class="term">CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</span></dt><dd><p>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_COMMAND_ERROR"></a><span class="term">CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</span></dt><dd><p>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</p></dd><dt><a name="CMDCTL_COMMAND_SENT"></a><span class="term">CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</span></dt><dd><p>
+This debug message indicates that the given command has been sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_NO_SUCH_USER"></a><span class="term">CMDCTL_NO_SUCH_USER username not found in user database: %1</span></dt><dd><p>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_NO_USER_ENTRIES_READ"></a><span class="term">CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</span></dt><dd><p>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</p></dd><dt><a name="CMDCTL_SEND_COMMAND"></a><span class="term">CMDCTL_SEND_COMMAND sending command %1 to module %2</span></dt><dd><p>
+This debug message indicates that the given command is being sent to
+the given module.
+</p></dd><dt><a name="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED"></a><span class="term">CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</span></dt><dd><p>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</p></dd><dt><a name="CMDCTL_STARTED"></a><span class="term">CMDCTL_STARTED cmdctl is listening for connections on %1:%2</span></dt><dd><p>
+The cmdctl daemon has started and is now listening for connections.
+</p></dd><dt><a name="CMDCTL_STOPPED_BY_KEYBOARD"></a><span class="term">CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="CMDCTL_UNCAUGHT_EXCEPTION"></a><span class="term">CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
+</p></dd><dt><a name="CMDCTL_USER_DATABASE_READ_ERROR"></a><span class="term">CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</span></dt><dd><p>
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
 </p></dd><dt><a name="CONFIG_CCSESSION_MSG"></a><span class="term">CONFIG_CCSESSION_MSG error in CC session message: %1</span></dt><dd><p>
 There was a problem with an incoming message on the command and control
 channel. The message does not appear to be a valid command, and is
@@ -65,77 +732,231 @@ missing a required element or contains an unknown data format. This
 most likely means that another BIND10 module is sending a bad message.
 The message itself is ignored by this module.
 </p></dd><dt><a name="CONFIG_CCSESSION_MSG_INTERNAL"></a><span class="term">CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</span></dt><dd><p>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
-</p></dd><dt><a name="CONFIG_FOPEN_ERR"></a><span class="term">CONFIG_FOPEN_ERR error opening %1: %2</span></dt><dd><p>
-There was an error opening the given file.
-</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
-There was a parse error in the JSON file. The given file does not appear
-to be in valid JSON format. Please verify that the filename is correct
-and that the contents are valid JSON.
-</p></dd><dt><a name="CONFIG_MANAGER_CONFIG"></a><span class="term">CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</span></dt><dd><p>
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</p><p>
+The most likely cause of this error is a programming error.  Please raise
+a bug report.
+</p></dd><dt><a name="CONFIG_GET_FAIL"></a><span class="term">CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</span></dt><dd><p>
 The configuration manager returned an error when this module requested
 the configuration. The full error message answer from the configuration
 manager is appended to the log error. The most likely cause is that
 the module is of a different (command specification) version than the
 running configuration manager.
-</p></dd><dt><a name="CONFIG_MANAGER_MOD_SPEC"></a><span class="term">CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</span></dt><dd><p>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
-</p></dd><dt><a name="CONFIG_MODULE_SPEC"></a><span class="term">CONFIG_MODULE_SPEC module specification error in %1: %2</span></dt><dd><p>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+</p></dd><dt><a name="CONFIG_GET_FAILED"></a><span class="term">CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</span></dt><dd><p>
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
+</p></dd><dt><a name="CONFIG_JSON_PARSE"></a><span class="term">CONFIG_JSON_PARSE JSON parse error in %1: %2</span></dt><dd><p>
+There was an error parsing the JSON file. The given file does not appear
+to be in valid JSON format. Please verify that the filename is correct
+and that the contents are valid JSON.
+</p></dd><dt><a name="CONFIG_LOG_CONFIG_ERRORS"></a><span class="term">CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</span></dt><dd><p>
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+</p></dd><dt><a name="CONFIG_LOG_EXPLICIT"></a><span class="term">CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program.  The logging
+configuration for the program will be updated with the information.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_EXPLICIT"></a><span class="term">CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</span></dt><dd><p>
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger.  As this does not match the logger specification for the
+program, it has been ignored.
+</p></dd><dt><a name="CONFIG_LOG_IGNORE_WILD"></a><span class="term">CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry.  The configuration is ignored.
+</p></dd><dt><a name="CONFIG_LOG_WILD_MATCH"></a><span class="term">CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</span></dt><dd><p>
+This is a debug message.  When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_FORMAT"></a><span class="term">CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</span></dt><dd><p>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
+</p></dd><dt><a name="CONFIG_MOD_SPEC_REJECT"></a><span class="term">CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</span></dt><dd><p>
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
+</p></dd><dt><a name="CONFIG_OPEN_FAIL"></a><span class="term">CONFIG_OPEN_FAIL error opening %1: %2</span></dt><dd><p>
+There was an error opening the given file. The reason for the failure
+is included in the message.
 </p></dd><dt><a name="DATASRC_CACHE_CREATE"></a><span class="term">DATASRC_CACHE_CREATE creating the hotspot cache</span></dt><dd><p>
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
 </p></dd><dt><a name="DATASRC_CACHE_DESTROY"></a><span class="term">DATASRC_CACHE_DESTROY destroying the hotspot cache</span></dt><dd><p>
 Debug information. The hotspot cache is being destroyed.
-</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the cache</span></dt><dd><p>
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
-</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the cache</span></dt><dd><p>
-The hotspot cache is enabled from now on.
-</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED the item '%1' is expired</span></dt><dd><p>
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+</p></dd><dt><a name="DATASRC_CACHE_DISABLE"></a><span class="term">DATASRC_CACHE_DISABLE disabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is disabled.
+</p></dd><dt><a name="DATASRC_CACHE_ENABLE"></a><span class="term">DATASRC_CACHE_ENABLE enabling the hotspot cache</span></dt><dd><p>
+A debug message issued when the hotspot cache is enabled.
+</p></dd><dt><a name="DATASRC_CACHE_EXPIRED"></a><span class="term">DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</span></dt><dd><p>
+A debug message issued when a hotspot cache lookup located the item but it
+had expired.  The item was removed and the program proceeded as if the item
+had not been found.
 </p></dd><dt><a name="DATASRC_CACHE_FOUND"></a><span class="term">DATASRC_CACHE_FOUND the item '%1' was found</span></dt><dd><p>
-Debug information. An item was successfully looked up in the hotspot cache.
-</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL cache is full, dropping oldest</span></dt><dd><p>
+Debug information. An item was successfully located in the hotspot cache.
+</p></dd><dt><a name="DATASRC_CACHE_FULL"></a><span class="term">DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</span></dt><dd><p>
 Debug information. After inserting an item into the hotspot cache, the
 maximum number of items was exceeded, so the least recently used item will
 be dropped. This should be directly followed by CACHE_REMOVE.
-</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the cache</span></dt><dd><p>
-Debug information. It means a new item is being inserted into the hotspot
+</p></dd><dt><a name="DATASRC_CACHE_INSERT"></a><span class="term">DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</span></dt><dd><p>
+A debug message indicating that a new item is being inserted into the hotspot
 cache.
-</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found</span></dt><dd><p>
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
-</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_CACHE_NOT_FOUND"></a><span class="term">DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</span></dt><dd><p>
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
+</p></dd><dt><a name="DATASRC_CACHE_OLD_FOUND"></a><span class="term">DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</span></dt><dd><p>
 Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
-</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the cache</span></dt><dd><p>
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
+</p></dd><dt><a name="DATASRC_CACHE_REMOVE"></a><span class="term">DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</span></dt><dd><p>
 Debug information. An item is being removed from the hotspot cache.
-</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_CACHE_SLOTS"></a><span class="term">DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</span></dt><dd><p>
 The maximum allowed number of items of the hotspot cache is set to the given
 number. If there are too many, some of them will be dropped. The size of 0
 means no limit.
+</p></dd><dt><a name="DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED"></a><span class="term">DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2</span></dt><dd><p>
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_RECORDS"></a><span class="term">DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</span></dt><dd><p>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</p></dd><dt><a name="DATASRC_DATABASE_FIND_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</span></dt><dd><p>
+The datasource backend provided resource records for the given RRset with
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION_EXACT"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</span></dt><dd><p>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_DNAME"></a><span class="term">DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</span></dt><dd><p>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL"></a><span class="term">DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1</span></dt><dd><p>
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXDOMAIN"></a><span class="term">DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXRRSET"></a><span class="term">DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_RRSET"></a><span class="term">DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</span></dt><dd><p>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE"></a><span class="term">DATASRC_DATABASE_ITERATE iterating zone %1</span></dt><dd><p>
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_END"></a><span class="term">DATASRC_DATABASE_ITERATE_END iterating zone finished</span></dt><dd><p>
+While iterating through the zone, the program reached end of the data.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_NEXT"></a><span class="term">DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2</span></dt><dd><p>
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4</span></dt><dd><p>
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_END"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</span></dt><dd><p>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences.  The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_NEXT"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</span></dt><dd><p>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset.  The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_START"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</span></dt><dd><p>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source.  The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADR_BADDATA"></a><span class="term">DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</span></dt><dd><p>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset.  The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message.  The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_COMMIT"></a><span class="term">DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</span></dt><dd><p>
+Debug information.  A set of updates to a zone has been successfully
+committed to the corresponding database backend.  The zone name,
+its class and the database name are printed.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_CREATED"></a><span class="term">DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3</span></dt><dd><p>
+Debug information.  A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_DESTROYED"></a><span class="term">DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3</span></dt><dd><p>
+Debug information.  A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_ROLLBACK"></a><span class="term">DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3</span></dt><dd><p>
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes.  The intermediate changes made through the updater won't
+be applied to the underlying database.  The zone name, its class, and
+the underlying database name are shown in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_ROLLBACKFAIL"></a><span class="term">DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4</span></dt><dd><p>
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails.  The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation.  In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid.  The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD"></a><span class="term">DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1</span></dt><dd><p>
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_CANCEL_NS"></a><span class="term">DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1</span></dt><dd><p>
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_CANCEL_SUB"></a><span class="term">DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1</span></dt><dd><p>
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_EMPTY"></a><span class="term">DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1</span></dt><dd><p>
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
 </p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
 </p></dd><dt><a name="DATASRC_MEM_ADD_RRSET"></a><span class="term">DATASRC_MEM_ADD_RRSET adding RRset '%1/%2' into zone '%3'</span></dt><dd><p>
 Debug information. An RRset is being added to the in-memory data source.
 </p></dd><dt><a name="DATASRC_MEM_ADD_WILDCARD"></a><span class="term">DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</span></dt><dd><p>
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
 </p></dd><dt><a name="DATASRC_MEM_ADD_ZONE"></a><span class="term">DATASRC_MEM_ADD_ZONE adding zone '%1/%2'</span></dt><dd><p>
 Debug information. A zone is being added into the in-memory data source.
 </p></dd><dt><a name="DATASRC_MEM_ANY_SUCCESS"></a><span class="term">DATASRC_MEM_ANY_SUCCESS ANY query for '%1' successful</span></dt><dd><p>
@@ -146,7 +967,7 @@ Debug information. The requested domain is an alias to a different domain,
 returning the CNAME instead.
 </p></dd><dt><a name="DATASRC_MEM_CNAME_COEXIST"></a><span class="term">DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</span></dt><dd><p>
 This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
 </p></dd><dt><a name="DATASRC_MEM_CNAME_TO_NONEMPTY"></a><span class="term">DATASRC_MEM_CNAME_TO_NONEMPTY can't add CNAME to domain with other data in '%1'</span></dt><dd><p>
 Someone or something tried to add a CNAME into a domain that already contains
 some other data. But the protocol forbids coexistence of CNAME with anything
@@ -164,10 +985,10 @@ encountered on the way.  This may lead to redirection to a different domain and
 stop the search.
 </p></dd><dt><a name="DATASRC_MEM_DNAME_FOUND"></a><span class="term">DATASRC_MEM_DNAME_FOUND DNAME found at '%1'</span></dt><dd><p>
 Debug information. A DNAME was found instead of the requested information.
-</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+</p></dd><dt><a name="DATASRC_MEM_DNAME_NS"></a><span class="term">DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</span></dt><dd><p>
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
 </p></dd><dt><a name="DATASRC_MEM_DOMAIN_EMPTY"></a><span class="term">DATASRC_MEM_DOMAIN_EMPTY requested domain '%1' is empty</span></dt><dd><p>
 Debug information. The requested domain exists in the tree of domains, but
 it is empty. Therefore it doesn't contain the requested resource type.
@@ -186,7 +1007,7 @@ Debug information. A zone object for this zone is being searched for in the
 in-memory data source.
 </p></dd><dt><a name="DATASRC_MEM_LOAD"></a><span class="term">DATASRC_MEM_LOAD loading zone '%1' from file '%2'</span></dt><dd><p>
 Debug information. The content of master file is being loaded into the memory.
-</p></dd><dt><a name="DATASRC_MEM_NOTFOUND"></a><span class="term">DATASRC_MEM_NOTFOUND requested domain '%1' not found</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_NOT_FOUND"></a><span class="term">DATASRC_MEM_NOT_FOUND requested domain '%1' not found</span></dt><dd><p>
 Debug information. The requested domain does not exist.
 </p></dd><dt><a name="DATASRC_MEM_NS_ENCOUNTERED"></a><span class="term">DATASRC_MEM_NS_ENCOUNTERED encountered a NS</span></dt><dd><p>
 Debug information. While searching for the requested domain, a NS was
@@ -222,21 +1043,21 @@ destroyed.
 Debug information. A domain above wildcard was reached, but there's something
 below the requested domain. Therefore the wildcard doesn't apply here.  This
 behaviour is specified by RFC 1034, section 4.3.3
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_DNAME"></a><span class="term">DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</span></dt><dd><p>
 The software refuses to load DNAME records into a wildcard domain.  It isn't
 explicitly forbidden, but the protocol is ambiguous about how this should
 behave and BIND 9 refuses that as well. Please describe your intention using
 different tools.
-</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_MEM_WILDCARD_NS"></a><span class="term">DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</span></dt><dd><p>
 The software refuses to load NS records into a wildcard domain.  It isn't
 explicitly forbidden, but the protocol is ambiguous about how this should
 behave and BIND 9 refuses that as well. Please describe your intention using
 different tools.
 </p></dd><dt><a name="DATASRC_META_ADD"></a><span class="term">DATASRC_META_ADD adding a data source into meta data source</span></dt><dd><p>
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
 </p></dd><dt><a name="DATASRC_META_ADD_CLASS_MISMATCH"></a><span class="term">DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</span></dt><dd><p>
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
 classes do not match.
 </p></dd><dt><a name="DATASRC_META_REMOVE"></a><span class="term">DATASRC_META_REMOVE removing data source from meta data source</span></dt><dd><p>
 Debug information. A data source is being removed from meta data source.
@@ -257,10 +1078,10 @@ specific error already.
 </p></dd><dt><a name="DATASRC_QUERY_BAD_REFERRAL"></a><span class="term">DATASRC_QUERY_BAD_REFERRAL bad referral to '%1'</span></dt><dd><p>
 The domain lives in another zone. But it is not possible to generate referral
 information for it.
-</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in cache</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_CACHED"></a><span class="term">DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</span></dt><dd><p>
 Debug information. The requested data were found in the hotspot cache, so
 no query is sent to the real data source.
-</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_CHECK_CACHE"></a><span class="term">DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</span></dt><dd><p>
 Debug information. While processing a query, lookup to the hotspot cache
 is being made.
 </p></dd><dt><a name="DATASRC_QUERY_COPY_AUTH"></a><span class="term">DATASRC_QUERY_COPY_AUTH copying authoritative section into message</span></dt><dd><p>
@@ -269,20 +1090,19 @@ response message.
 </p></dd><dt><a name="DATASRC_QUERY_DELEGATION"></a><span class="term">DATASRC_QUERY_DELEGATION looking for delegation on the path to '%1'</span></dt><dd><p>
 Debug information. The software is trying to identify delegation points on the
 way down to the given domain.
-</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</span></dt><dd><p>
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+</p></dd><dt><a name="DATASRC_QUERY_EMPTY_CNAME"></a><span class="term">DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</span></dt><dd><p>
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
 </p></dd><dt><a name="DATASRC_QUERY_EMPTY_DNAME"></a><span class="term">DATASRC_QUERY_EMPTY_DNAME the DNAME on '%1' is empty</span></dt><dd><p>
 During an attempt to synthesize CNAME from this DNAME it was discovered the
 DNAME is empty (it has no records). This indicates problem with supplied data.
 </p></dd><dt><a name="DATASRC_QUERY_FAIL"></a><span class="term">DATASRC_QUERY_FAIL query failed</span></dt><dd><p>
 Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
 </p></dd><dt><a name="DATASRC_QUERY_FOLLOW_CNAME"></a><span class="term">DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</span></dt><dd><p>
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
 </p></dd><dt><a name="DATASRC_QUERY_GET_MX_ADDITIONAL"></a><span class="term">DATASRC_QUERY_GET_MX_ADDITIONAL addition of A/AAAA for '%1' requested by MX '%2'</span></dt><dd><p>
 Debug information. While processing a query, a MX record was met. It
 references the mentioned address, so A/AAAA records for it are looked up
@@ -301,12 +1121,12 @@ operation code.
 </p></dd><dt><a name="DATASRC_QUERY_IS_AUTH"></a><span class="term">DATASRC_QUERY_IS_AUTH auth query (%1/%2)</span></dt><dd><p>
 Debug information. The last DO_QUERY is an auth query.
 </p></dd><dt><a name="DATASRC_QUERY_IS_GLUE"></a><span class="term">DATASRC_QUERY_IS_GLUE glue query (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
 </p></dd><dt><a name="DATASRC_QUERY_IS_NOGLUE"></a><span class="term">DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
 glue.
 </p></dd><dt><a name="DATASRC_QUERY_IS_REF"></a><span class="term">DATASRC_QUERY_IS_REF query for referral (%1/%2)</span></dt><dd><p>
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
 </p></dd><dt><a name="DATASRC_QUERY_IS_SIMPLE"></a><span class="term">DATASRC_QUERY_IS_SIMPLE simple query (%1/%2)</span></dt><dd><p>
 Debug information. The last DO_QUERY is a simple query.
 </p></dd><dt><a name="DATASRC_QUERY_MISPLACED_TASK"></a><span class="term">DATASRC_QUERY_MISPLACED_TASK task of this type should not be here</span></dt><dd><p>
@@ -324,10 +1144,10 @@ does not have one. This indicates problem with provided data.
 The underlying data source failed to answer the no-glue query. 1 means some
 error, 2 is not implemented. The data source should have logged the specific
 error already.
-</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_AUTH"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
 Debug information. The hotspot cache is ignored for authoritative ANY queries
 for consistency reasons.
-</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE"></a><span class="term">DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</span></dt><dd><p>
 Debug information. The hotspot cache is ignored for ANY queries for consistency
 reasons.
 </p></dd><dt><a name="DATASRC_QUERY_NO_DS_NSEC"></a><span class="term">DATASRC_QUERY_NO_DS_NSEC there's no DS record in the '%1' zone</span></dt><dd><p>
@@ -341,7 +1161,7 @@ Lookup of domain failed because the data have no zone that contain the
 domain. Maybe someone sent a query to the wrong server for some reason.
 </p></dd><dt><a name="DATASRC_QUERY_PROCESS"></a><span class="term">DATASRC_QUERY_PROCESS processing query '%1/%2' in the '%3' class</span></dt><dd><p>
 Debug information. A sure query is being processed now.
-</p></dd><dt><a name="DATASRC_QUERY_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</span></dt><dd><p>
 The user wants DNSSEC and we discovered the entity doesn't exist (either
 domain or the record). But there was an error getting NSEC/NSEC3 record
 to prove the nonexistence.
@@ -357,13 +1177,13 @@ The underlying data source failed to answer the simple query. 1 means some
 error, 2 is not implemented. The data source should have logged the specific
 error already.
 </p></dd><dt><a name="DATASRC_QUERY_SYNTH_CNAME"></a><span class="term">DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</span></dt><dd><p>
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
 </p></dd><dt><a name="DATASRC_QUERY_TASK_FAIL"></a><span class="term">DATASRC_QUERY_TASK_FAIL task failed with %1</span></dt><dd><p>
 The query subtask failed. The reason should have been reported by the subtask
 already. The code is 1 for error, 2 for not implemented.
-</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_TOO_MANY_CNAMES"></a><span class="term">DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</span></dt><dd><p>
 A CNAME led to another CNAME and it led to another, and so on. After 16
 CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
 might possibly be a loop as well. Note that some of the CNAMEs might have
@@ -377,7 +1197,7 @@ domain is being looked for now.
 </p></dd><dt><a name="DATASRC_QUERY_WILDCARD_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_FAIL error processing wildcard for '%1'</span></dt><dd><p>
 During an attempt to cover the domain by a wildcard an error happened. The
 exact kind was hopefully already reported.
-</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVENX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL"></a><span class="term">DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</span></dt><dd><p>
 While processing a wildcard, it wasn't possible to prove nonexistence of the
 given domain or record.  The code is 1 for error and 2 for not implemented.
 </p></dd><dt><a name="DATASRC_QUERY_WILDCARD_REFERRAL"></a><span class="term">DATASRC_QUERY_WILDCARD_REFERRAL unable to find referral info for '%1' (%2)</span></dt><dd><p>
@@ -385,15 +1205,21 @@ While processing a wildcard, a referral was met. But it wasn't possible to get
 enough information for it.  The code is 1 for error, 2 for not implemented.
 </p></dd><dt><a name="DATASRC_SQLITE_CLOSE"></a><span class="term">DATASRC_SQLITE_CLOSE closing SQLite database</span></dt><dd><p>
 Debug information. The SQLite data source is closing the database file.
-</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE sQLite data source created</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_CONNCLOSE"></a><span class="term">DATASRC_SQLITE_CONNCLOSE Closing sqlite database</span></dt><dd><p>
+The database file is no longer needed and is being closed.
+</p></dd><dt><a name="DATASRC_SQLITE_CONNOPEN"></a><span class="term">DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</span></dt><dd><p>
+The database file is being opened so it can start providing data.
+</p></dd><dt><a name="DATASRC_SQLITE_CREATE"></a><span class="term">DATASRC_SQLITE_CREATE SQLite data source created</span></dt><dd><p>
 Debug information. An instance of SQLite data source is being created.
-</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY sQLite data source destroyed</span></dt><dd><p>
+</p></dd><dt><a name="DATASRC_SQLITE_DESTROY"></a><span class="term">DATASRC_SQLITE_DESTROY SQLite data source destroyed</span></dt><dd><p>
 Debug information. An instance of SQLite data source is being destroyed.
+</p></dd><dt><a name="DATASRC_SQLITE_DROPCONN"></a><span class="term">DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</span></dt><dd><p>
+The object around a database connection is being destroyed.
 </p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE"></a><span class="term">DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</span></dt><dd><p>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
 should hold this domain.
-</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOTFOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</span></dt><dd><p>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+</p></dd><dt><a name="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND"></a><span class="term">DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</span></dt><dd><p>
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
 no such zone in our data.
 </p></dd><dt><a name="DATASRC_SQLITE_FIND"></a><span class="term">DATASRC_SQLITE_FIND looking for RRset '%1/%2'</span></dt><dd><p>
 Debug information. The SQLite data source is looking up a resource record
@@ -417,7 +1243,7 @@ and type in the database.
 Debug information. The SQLite data source is identifying if this domain is
 a referral and where it goes.
 </p></dd><dt><a name="DATASRC_SQLITE_FINDREF_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</span></dt><dd><p>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
 it contains different class than the query was for.
 </p></dd><dt><a name="DATASRC_SQLITE_FIND_BAD_CLASS"></a><span class="term">DATASRC_SQLITE_FIND_BAD_CLASS class mismatch looking for an RRset ('%1' and '%2')</span></dt><dd><p>
 The SQLite data source was looking up an RRset, but the data source contains
@@ -428,21 +1254,30 @@ source.
 </p></dd><dt><a name="DATASRC_SQLITE_FIND_NSEC3_NO_ZONE"></a><span class="term">DATASRC_SQLITE_FIND_NSEC3_NO_ZONE no such zone '%1'</span></dt><dd><p>
 The SQLite data source was asked to provide a NSEC3 record for given zone.
 But it doesn't contain that zone.
+</p></dd><dt><a name="DATASRC_SQLITE_NEWCONN"></a><span class="term">DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</span></dt><dd><p>
+A wrapper object to hold database connection is being initialized.
 </p></dd><dt><a name="DATASRC_SQLITE_OPEN"></a><span class="term">DATASRC_SQLITE_OPEN opening SQLite database '%1'</span></dt><dd><p>
 Debug information. The SQLite data source is loading an SQLite database in
 the provided file.
 </p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS"></a><span class="term">DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</span></dt><dd><p>
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message.  The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
 </p></dd><dt><a name="DATASRC_SQLITE_PREVIOUS_NO_ZONE"></a><span class="term">DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</span></dt><dd><p>
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
 </p></dd><dt><a name="DATASRC_SQLITE_SETUP"></a><span class="term">DATASRC_SQLITE_SETUP setting up SQLite database</span></dt><dd><p>
 The database for SQLite data source was found empty. It is assumed this is the
 first run and it is being initialized with current schema.  It'll still contain
 no data, but it will be ready for use.
-</p></dd><dt><a name="DATASRC_STATIC_BAD_CLASS"></a><span class="term">DATASRC_STATIC_BAD_CLASS static data source can handle CH only</span></dt><dd><p>
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+</p></dd><dt><a name="DATASRC_STATIC_CLASS_NOT_CH"></a><span class="term">DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</span></dt><dd><p>
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
 </p></dd><dt><a name="DATASRC_STATIC_CREATE"></a><span class="term">DATASRC_STATIC_CREATE creating the static datasource</span></dt><dd><p>
 Debug information. The static data source (the one holding stuff like
 version.bind) is being created.
@@ -452,142 +1287,259 @@ data source.
 </p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
 This indicates a programming error. An internal task of unknown type was
 generated.
-</p></dd><dt><a name="LOGIMPL_ABOVEDBGMAX"></a><span class="term">LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
-</p></dd><dt><a name="LOGIMPL_BADDEBUG"></a><span class="term">LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</span></dt><dd><p>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form.  In particular,
-it starts DEBUG but does not end with an integer.
-</p></dd><dt><a name="LOGIMPL_BELOWDBGMIN"></a><span class="term">LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
-</p></dd><dt><a name="MSG_BADDESTINATION"></a><span class="term">MSG_BADDESTINATION unrecognized log destination: %1</span></dt><dd><p>
+</p></dd><dt><a name="LIBXFRIN_DIFFERENT_TTL"></a><span class="term">LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.</span></dt><dd><p>
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
+</p></dd><dt><a name="LIBXFRIN_NO_JOURNAL"></a><span class="term">LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</span></dt><dd><p>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled.  At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR).  Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
+</p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value.  The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BAD_DEBUG_STRING"></a><span class="term">LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</span></dt><dd><p>
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22).  The appearance of this message indicates
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOGIMPL_BELOW_MIN_DEBUG"></a><span class="term">LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</span></dt><dd><p>
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value.  The appearance of this message may indicate
+a programming error - please submit a bug report.
+</p></dd><dt><a name="LOG_BAD_DESTINATION"></a><span class="term">LOG_BAD_DESTINATION unrecognized log destination: %1</span></dt><dd><p>
 A logger destination value was given that was not recognized. The
 destination should be one of "console", "file", or "syslog".
-</p></dd><dt><a name="MSG_BADSEVERITY"></a><span class="term">MSG_BADSEVERITY unrecognized log severity: %1</span></dt><dd><p>
+</p></dd><dt><a name="LOG_BAD_SEVERITY"></a><span class="term">LOG_BAD_SEVERITY unrecognized log severity: %1</span></dt><dd><p>
 A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
-</p></dd><dt><a name="MSG_BADSTREAM"></a><span class="term">MSG_BADSTREAM bad log console output stream: %1</span></dt><dd><p>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
-</p></dd><dt><a name="MSG_DUPLNS"></a><span class="term">MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
-When reading a message file, more than one $NAMESPACE directive was found.  In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
-</p></dd><dt><a name="MSG_DUPMSGID"></a><span class="term">MSG_DUPMSGID duplicate message ID (%1) in compiled code</span></dt><dd><p>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules.  (All message
-IDs should be unique throughout BIND10.)  This has no impact on the operation
-of the server other that erroneous messages may be logged.  (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded.  However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
-</p></dd><dt><a name="MSG_IDNOTFND"></a><span class="term">MSG_IDNOTFND could not replace message text for '%1': no such message</span></dt><dd><p>
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
+</p></dd><dt><a name="LOG_BAD_STREAM"></a><span class="term">LOG_BAD_STREAM bad log console output stream: %1</span></dt><dd><p>
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
+</p></dd><dt><a name="LOG_DUPLICATE_MESSAGE_ID"></a><span class="term">LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</span></dt><dd><p>
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code.  This indicates a
+programming error; please submit a bug report.
+</p></dd><dt><a name="LOG_DUPLICATE_NAMESPACE"></a><span class="term">LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</span></dt><dd><p>
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.)  Such a condition is regarded as an
+error and the read will be abandoned.
+</p></dd><dt><a name="LOG_INPUT_OPEN_FAIL"></a><span class="term">LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</span></dt><dd><p>
+The program was not able to open the specified input message file for
+the reason given.
+</p></dd><dt><a name="LOG_INVALID_MESSAGE_ID"></a><span class="term">LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</span></dt><dd><p>
+An invalid message identification (ID) has been found during the read of
+a message file.  Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</p></dd><dt><a name="LOG_NAMESPACE_EXTRA_ARGS"></a><span class="term">LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed.  This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
+</p></dd><dt><a name="LOG_NAMESPACE_INVALID_ARG"></a><span class="term">LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
+</p></dd><dt><a name="LOG_NAMESPACE_NO_ARGS"></a><span class="term">LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
+</p></dd><dt><a name="LOG_NO_MESSAGE_ID"></a><span class="term">LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message.  This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
+</p></dd><dt><a name="LOG_NO_MESSAGE_TEXT"></a><span class="term">LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message.  This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</p></dd><dt><a name="LOG_NO_SUCH_MESSAGE"></a><span class="term">LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</span></dt><dd><p>
 During start-up a local message file was read.  A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary.  Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary.  This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</p><p>
+There may be several reasons why this message may appear:
+</p><p>
+- The message ID has been mis-spelled in the local message file.
+</p><p>
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</p><p>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</p><p>
+Whatever the reason, there is no impact on the operation of BIND 10.
+</p></dd><dt><a name="LOG_OPEN_OUTPUT_FAIL"></a><span class="term">LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</span></dt><dd><p>
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
+</p></dd><dt><a name="LOG_PREFIX_EXTRA_ARGS"></a><span class="term">LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</p><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_PREFIX_INVALID_ARG"></a><span class="term">LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit).  A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
 </p><p>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
-</p></dd><dt><a name="MSG_INVMSGID"></a><span class="term">MSG_INVMSGID line %1: invalid message identification '%2'</span></dt><dd><p>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
-</p></dd><dt><a name="MSG_NOMSGID"></a><span class="term">MSG_NOMSGID line %1: message definition line found without a message ID</span></dt><dd><p>
-Message definition lines are lines starting with a "%".  The rest of the line
-should comprise the message ID and text describing the message.  This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
-</p></dd><dt><a name="MSG_NOMSGTXT"></a><span class="term">MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</span></dt><dd><p>
-Message definition lines are lines starting with a "%".  The rest of the line
-should comprise the message ID and text describing the message.  This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
-</p></dd><dt><a name="MSG_NSEXTRARG"></a><span class="term">MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed.  This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
-</p></dd><dt><a name="MSG_NSINVARG"></a><span class="term">MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</span></dt><dd><p>
-The $NAMESPACE argument should be a valid C++ namespace.  The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct.  The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
-</p></dd><dt><a name="MSG_NSNOARG"></a><span class="term">MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</span></dt><dd><p>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed.  This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
-</p></dd><dt><a name="MSG_OPENIN"></a><span class="term">MSG_OPENIN unable to open message file %1 for input: %2</span></dt><dd><p>
-The program was not able to open the specified input message file for the
-reason given.
-</p></dd><dt><a name="MSG_OPENOUT"></a><span class="term">MSG_OPENOUT unable to open %1 for output: %2</span></dt><dd><p>
-The program was not able to open the specified output file for the reason
-given.
-</p></dd><dt><a name="MSG_PRFEXTRARG"></a><span class="term">MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</span></dt><dd><p>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created.  This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
-</p></dd><dt><a name="MSG_PRFINVARG"></a><span class="term">MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</span></dt><dd><p>
-The $PREFIX argument is used in a symbol name in a C++ header file.  As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
-</p></dd><dt><a name="MSG_RDLOCMES"></a><span class="term">MSG_RDLOCMES reading local message file %1</span></dt><dd><p>
-This is an informational message output by BIND10 when it starts to read a
-local message file.  (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
-</p></dd><dt><a name="MSG_READERR"></a><span class="term">MSG_READERR error reading from message file %1: %2</span></dt><dd><p>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
+</p></dd><dt><a name="LOG_READING_LOCAL_FILE"></a><span class="term">LOG_READING_LOCAL_FILE reading local message file %1</span></dt><dd><p>
+This is an informational message output by BIND 10 when it starts to read
+a local message file.  (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
+</p></dd><dt><a name="LOG_READ_ERROR"></a><span class="term">LOG_READ_ERROR error reading from message file %1: %2</span></dt><dd><p>
 The specified error was encountered reading from the named message file.
-</p></dd><dt><a name="MSG_UNRECDIR"></a><span class="term">MSG_UNRECDIR line %1: unrecognised directive '%2'</span></dt><dd><p>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
-</p></dd><dt><a name="MSG_WRITERR"></a><span class="term">MSG_WRITERR error writing to %1: %2</span></dt><dd><p>
-The specified error was encountered by the message compiler when writing to
-the named output file.
-</p></dd><dt><a name="NSAS_INVRESPSTR"></a><span class="term">NSAS_INVRESPSTR queried for %1 but got invalid response</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver.  The NSAS made a query for a RR for the
-specified nameserver but received an invalid response.  Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
-</p></dd><dt><a name="NSAS_INVRESPTC"></a><span class="term">NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver.  The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
-</p></dd><dt><a name="NSAS_LOOKUPCANCEL"></a><span class="term">NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</span></dt><dd><p>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
-</p></dd><dt><a name="NSAS_LOOKUPZONE"></a><span class="term">NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</span></dt><dd><p>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
-</p></dd><dt><a name="NSAS_NSADDR"></a><span class="term">NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
-</p></dd><dt><a name="NSAS_NSLKUPFAIL"></a><span class="term">NSAS_NSLKUPFAIL failed to lookup any %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver.  This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
-</p></dd><dt><a name="NSAS_NSLKUPSUCC"></a><span class="term">NSAS_NSLKUPSUCC found address %1 for %2</span></dt><dd><p>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
-</p></dd><dt><a name="NSAS_SETRTT"></a><span class="term">NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</span></dt><dd><p>
+</p></dd><dt><a name="LOG_UNRECOGNISED_DIRECTIVE"></a><span class="term">LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</span></dt><dd><p>
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
+</p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
+The specified error was encountered by the message compiler when writing
+to the named output file.
+</p></dd><dt><a name="NOTIFY_OUT_DATASRC_ACCESS_FAILURE"></a><span class="term">NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</span></dt><dd><p>
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message.  This can be either a
+configuration error or installation setup failure.
+</p></dd><dt><a name="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND"></a><span class="term">NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</span></dt><dd><p>
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source.  This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process.  So this means some critical inconsistency in the data
+source or software bug.
+</p></dd><dt><a name="NOTIFY_OUT_INVALID_ADDRESS"></a><span class="term">NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</span></dt><dd><p>
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_OPCODE"></a><span class="term">NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QID"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_BAD_QUERY_NAME"></a><span class="term">NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</span></dt><dd><p>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_QR_NOT_SET"></a><span class="term">NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</span></dt><dd><p>
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+</p></dd><dt><a name="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION"></a><span class="term">NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</span></dt><dd><p>
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</p></dd><dt><a name="NOTIFY_OUT_RETRY_EXCEEDED"></a><span class="term">NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</span></dt><dd><p>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</p></dd><dt><a name="NOTIFY_OUT_SENDING_NOTIFY"></a><span class="term">NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</span></dt><dd><p>
+A notify message is sent to the secondary nameserver at the given
+address.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_SOCKET_RECV_ERROR"></a><span class="term">NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</span></dt><dd><p>
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
+</p></dd><dt><a name="NOTIFY_OUT_TIMEOUT"></a><span class="term">NOTIFY_OUT_TIMEOUT retry notify to %1#%2</span></dt><dd><p>
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
+</p></dd><dt><a name="NOTIFY_OUT_ZONE_BAD_SOA"></a><span class="term">NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</span></dt><dd><p>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs.  Notify message won't
+be sent to such a zone.
+</p></dd><dt><a name="NOTIFY_OUT_ZONE_NO_NS"></a><span class="term">NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</span></dt><dd><p>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR.  Notify message won't be sent to such a zone.
+</p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
+</p></dd><dt><a name="NSAS_FOUND_ADDRESS"></a><span class="term">NSAS_FOUND_ADDRESS found address %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
+</p></dd><dt><a name="NSAS_INVALID_RESPONSE"></a><span class="term">NSAS_INVALID_RESPONSE queried for %1 but got invalid response</span></dt><dd><p>
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</p><p>
+This message indicates an internal error in the NSAS.  Please raise a
+bug report.
+</p></dd><dt><a name="NSAS_LOOKUP_CANCEL"></a><span class="term">NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</span></dt><dd><p>
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
+</p></dd><dt><a name="NSAS_NS_LOOKUP_FAIL"></a><span class="term">NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</span></dt><dd><p>
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver.  This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
+</p></dd><dt><a name="NSAS_SEARCH_ZONE_NS"></a><span class="term">NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</span></dt><dd><p>
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
+</p></dd><dt><a name="NSAS_UPDATE_RTT"></a><span class="term">NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</span></dt><dd><p>
 A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver.  The RTT has been updated using the value given and the new RTT is
-displayed.  (The RTT is subject to a calculation that damps out sudden
-changes.  As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver.  The RTT has been updated using the value given
+and the new RTT is displayed.  (The RTT is subject to a calculation that
+damps out sudden changes.  As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</p></dd><dt><a name="NSAS_WRONG_ANSWER"></a><span class="term">NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</span></dt><dd><p>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</p><p>
+This message indicates an internal error in the NSAS.  Please raise a
+bug report.
 </p></dd><dt><a name="RESLIB_ANSWER"></a><span class="term">RESLIB_ANSWER answer received in response to query for <%1></span></dt><dd><p>
 A debug message recording that an answer has been received to an upstream
 query for the specified question.  Previous debug messages will have indicated
@@ -599,95 +1551,95 @@ the server to which the question was sent.
 </p></dd><dt><a name="RESLIB_DEEPEST"></a><span class="term">RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2</span></dt><dd><p>
 A debug message, a cache lookup did not find the specified <name, class,
 type> tuple in the cache; instead, the deepest delegation found is indicated.
-</p></dd><dt><a name="RESLIB_FOLLOWCNAME"></a><span class="term">RESLIB_FOLLOWCNAME following CNAME chain to <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_FOLLOW_CNAME"></a><span class="term">RESLIB_FOLLOW_CNAME following CNAME chain to <%1></span></dt><dd><p>
 A debug message, a CNAME response was received and another query is being issued
 for the <name, class, type> tuple.
-</p></dd><dt><a name="RESLIB_LONGCHAIN"></a><span class="term">RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_LONG_CHAIN"></a><span class="term">RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</span></dt><dd><p>
 A debug message recording that a CNAME response has been received to an upstream
 query for the specified question (Previous debug messages will have indicated
 the server to which the question was sent).  However, receipt of this CNAME
 has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
 is where on CNAME points to another) and so an error is being returned.
-</p></dd><dt><a name="RESLIB_NONSRRSET"></a><span class="term">RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NO_NS_RRSET"></a><span class="term">RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></span></dt><dd><p>
 A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral.  However, the received message did
+query and was categorized as a referral.  However, the received message did
 not contain any NS RRsets.  This may indicate a programming error in the
 response classification code.
-</p></dd><dt><a name="RESLIB_NSASLOOK"></a><span class="term">RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NSAS_LOOKUP"></a><span class="term">RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</span></dt><dd><p>
 A debug message, the RunningQuery object is querying the NSAS for the
 nameservers for the specified zone.
-</p></dd><dt><a name="RESLIB_NXDOMRR"></a><span class="term">RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_NXDOM_NXRR"></a><span class="term">RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></span></dt><dd><p>
 A debug message recording that either a NXDOMAIN or an NXRRSET response has
 been received to an upstream query for the specified question.  Previous debug
 messages will have indicated the server to which the question was sent.
 </p></dd><dt><a name="RESLIB_PROTOCOL"></a><span class="term">RESLIB_PROTOCOL protocol error in answer for %1:  %3</span></dt><dd><p>
 A debug message indicating that a protocol error was received.  As there
 are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_PROTOCOLRTRY"></a><span class="term">RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_PROTOCOL_RETRY"></a><span class="term">RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</span></dt><dd><p>
 A debug message indicating that a protocol error was received and that
 the resolver is repeating the query to the same nameserver.  After this
 repeated query, there will be the indicated number of retries left.
-</p></dd><dt><a name="RESLIB_RCODERR"></a><span class="term">RESLIB_RCODERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RCODE_ERR"></a><span class="term">RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></span></dt><dd><p>
 A debug message, the response to the specified query indicated an error
 that is not covered by a specific code path.  A SERVFAIL will be returned.
-</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question.  Previous debug messages will
-have indicated the server to which the question was sent.
-</p></dd><dt><a name="RESLIB_REFERZONE"></a><span class="term">RESLIB_REFERZONE referred to zone %1</span></dt><dd><p>
-A debug message indicating that the last referral message was to the specified
-zone.
-</p></dd><dt><a name="RESLIB_RESCAFND"></a><span class="term">RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_FIND"></a><span class="term">RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</span></dt><dd><p>
 This is a debug message and indicates that a RecursiveQuery object found the
 the specified <name, class, type> tuple in the cache.  The instance number
 at the end of the message indicates which of the two resolve() methods has
 been called.
-</p></dd><dt><a name="RESLIB_RESCANOTFND"></a><span class="term">RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RECQ_CACHE_NO_FIND"></a><span class="term">RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</span></dt><dd><p>
 This is a debug message and indicates that the look in the cache made by the
 RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
 object has been created to resolve the question.  The instance number at
 the end of the message indicates which of the two resolve() methods has
 been called.
+</p></dd><dt><a name="RESLIB_REFERRAL"></a><span class="term">RESLIB_REFERRAL referral received in response to query for <%1></span></dt><dd><p>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question.  Previous debug messages will
+have indicated the server to which the question was sent.
+</p></dd><dt><a name="RESLIB_REFER_ZONE"></a><span class="term">RESLIB_REFER_ZONE referred to zone %1</span></dt><dd><p>
+A debug message indicating that the last referral message was to the specified
+zone.
 </p></dd><dt><a name="RESLIB_RESOLVE"></a><span class="term">RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</span></dt><dd><p>
 A debug message, the RecursiveQuery::resolve method has been called to resolve
 the specified <name, class, type> tuple.  The first action will be to lookup
 the specified tuple in the cache.  The instance number at the end of the
 message indicates which of the two resolve() methods has been called.
-</p></dd><dt><a name="RESLIB_RRSETFND"></a><span class="term">RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RRSET_FOUND"></a><span class="term">RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</span></dt><dd><p>
 A debug message, indicating that when RecursiveQuery::resolve queried the
 cache, a single RRset was found which was put in the answer.  The instance
 number at the end of the message indicates which of the two resolve()
 methods has been called.
 </p></dd><dt><a name="RESLIB_RTT"></a><span class="term">RESLIB_RTT round-trip time of last query calculated as %1 ms</span></dt><dd><p>
 A debug message giving the round-trip time of the last query and response.
-</p></dd><dt><a name="RESLIB_RUNCAFND"></a><span class="term">RESLIB_RUNCAFND found <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_FIND"></a><span class="term">RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</span></dt><dd><p>
 This is a debug message and indicates that a RunningQuery object found
 the specified <name, class, type> tuple in the cache.
-</p></dd><dt><a name="RESLIB_RUNCALOOK"></a><span class="term">RESLIB_RUNCALOOK looking up up <%1> in the cache</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_CACHE_LOOKUP"></a><span class="term">RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</span></dt><dd><p>
 This is a debug message and indicates that a RunningQuery object has made
 a call to its doLookup() method to look up the specified <name, class, type>
 tuple, the first action of which will be to examine the cache.
-</p></dd><dt><a name="RESLIB_RUNQUFAIL"></a><span class="term">RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_FAIL"></a><span class="term">RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</span></dt><dd><p>
 A debug message indicating that a RunningQuery's failure callback has been
 called because all nameservers for the zone in question are unreachable.
-</p></dd><dt><a name="RESLIB_RUNQUSUCC"></a><span class="term">RESLIB_RUNQUSUCC success callback - sending query to %1</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_RUNQ_SUCCESS"></a><span class="term">RESLIB_RUNQ_SUCCESS success callback - sending query to %1</span></dt><dd><p>
 A debug message indicating that a RunningQuery's success callback has been
 called because a nameserver has been found, and that a query is being sent
 to the specified nameserver.
-</p></dd><dt><a name="RESLIB_TESTSERV"></a><span class="term">RESLIB_TESTSERV setting test server to %1(%2)</span></dt><dd><p>
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed.  As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
-</p></dd><dt><a name="RESLIB_TESTUPSTR"></a><span class="term">RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</span></dt><dd><p>
+</p></dd><dt><a name="RESLIB_TEST_SERVER"></a><span class="term">RESLIB_TEST_SERVER setting test server to %1(%2)</span></dt><dd><p>
+This is a warning message only generated in unit tests.  It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed.  If seen during normal operation,
+please submit a bug report.
+</p></dd><dt><a name="RESLIB_TEST_UPSTREAM"></a><span class="term">RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</span></dt><dd><p>
 This is a debug message and should only be seen in unit tests.  A query for
 the specified <name, class, type> tuple is being sent to a test nameserver
 whose address is given in the message.
 </p></dd><dt><a name="RESLIB_TIMEOUT"></a><span class="term">RESLIB_TIMEOUT query <%1> to %2 timed out</span></dt><dd><p>
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
-</p></dd><dt><a name="RESLIB_TIMEOUTRTRY"></a><span class="term">RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
+</p></dd><dt><a name="RESLIB_TIMEOUT_RETRY"></a><span class="term">RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</span></dt><dd><p>
 A debug message indicating that the specified query has timed out and that
 the resolver is repeating the query to the same nameserver.  After this
 repeated query, there will be the indicated number of retries left.
@@ -699,143 +1651,725 @@ gives no cause for concern.
 </p></dd><dt><a name="RESLIB_UPSTREAM"></a><span class="term">RESLIB_UPSTREAM sending upstream query for <%1> to %2</span></dt><dd><p>
 A debug message indicating that a query for the specified <name, class, type>
 tuple is being sent to a nameserver whose address is given in the message.
-</p></dd><dt><a name="RESOLVER_AXFRTCP"></a><span class="term">RESOLVER_AXFRTCP AXFR request received over TCP</span></dt><dd><p>
-A debug message, the resolver received a NOTIFY message over TCP.  The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_AXFRUDP"></a><span class="term">RESOLVER_AXFRUDP AXFR request received over UDP</span></dt><dd><p>
-A debug message, the resolver received a NOTIFY message over UDP.  The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_CLTMOSMALL"></a><span class="term">RESOLVER_CLTMOSMALL client timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the query
-timeout is too small.
-</p></dd><dt><a name="RESOLVER_CONFIGCHAN"></a><span class="term">RESOLVER_CONFIGCHAN configuration channel created</span></dt><dd><p>
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
-</p></dd><dt><a name="RESOLVER_CONFIGERR"></a><span class="term">RESOLVER_CONFIGERR error in configuration: %1</span></dt><dd><p>
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error).  The reason for the error, given as a parameter in the message,
-will give more details.
-</p></dd><dt><a name="RESOLVER_CONFIGLOAD"></a><span class="term">RESOLVER_CONFIGLOAD configuration loaded</span></dt><dd><p>
-A debug message, output when the resolver configuration has been successfully
-loaded.
-</p></dd><dt><a name="RESOLVER_CONFIGUPD"></a><span class="term">RESOLVER_CONFIGUPD configuration updated: %1</span></dt><dd><p>
-A debug message, the configuration has been updated with the specified
-information.
+</p></dd><dt><a name="RESOLVER_AXFR_TCP"></a><span class="term">RESOLVER_AXFR_TCP AXFR request received over TCP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP.  Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_AXFR_UDP"></a><span class="term">RESOLVER_AXFR_UDP AXFR request received over UDP</span></dt><dd><p>
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP.  Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_CLIENT_TIME_SMALL"></a><span class="term">RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small.  The configuration
+update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_CHANNEL"></a><span class="term">RESOLVER_CONFIG_CHANNEL configuration channel created</span></dt><dd><p>
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
+</p></dd><dt><a name="RESOLVER_CONFIG_ERROR"></a><span class="term">RESOLVER_CONFIG_ERROR error in configuration: %1</span></dt><dd><p>
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error).  The reason for the error, included
+in the message, will give more details.  The configuration update is
+not applied and the resolver parameters were not changed.
+</p></dd><dt><a name="RESOLVER_CONFIG_LOADED"></a><span class="term">RESOLVER_CONFIG_LOADED configuration loaded</span></dt><dd><p>
+This is a debug message output when the resolver configuration has been
+successfully loaded.
+</p></dd><dt><a name="RESOLVER_CONFIG_UPDATED"></a><span class="term">RESOLVER_CONFIG_UPDATED configuration updated: %1</span></dt><dd><p>
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
 </p></dd><dt><a name="RESOLVER_CREATED"></a><span class="term">RESOLVER_CREATED main resolver object created</span></dt><dd><p>
-A debug message, output when the Resolver() object has been created.
-</p></dd><dt><a name="RESOLVER_DNSMSGRCVD"></a><span class="term">RESOLVER_DNSMSGRCVD DNS message received: %1</span></dt><dd><p>
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
-</p></dd><dt><a name="RESOLVER_DNSMSGSENT"></a><span class="term">RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message indicating that the main resolver object has
+been created.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_RECEIVED"></a><span class="term">RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</span></dt><dd><p>
+This is a debug message from the resolver listing the contents of a
+received DNS message.
+</p></dd><dt><a name="RESOLVER_DNS_MESSAGE_SENT"></a><span class="term">RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</span></dt><dd><p>
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
 </p></dd><dt><a name="RESOLVER_FAILED"></a><span class="term">RESOLVER_FAILED resolver failed, reason: %1</span></dt><dd><p>
-This is an error message output when an unhandled exception is caught by the
-resolver.  All it can do is to shut down.
-</p></dd><dt><a name="RESOLVER_FWDADDR"></a><span class="term">RESOLVER_FWDADDR setting forward address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
-</p></dd><dt><a name="RESOLVER_FWDQUERY"></a><span class="term">RESOLVER_FWDQUERY processing forward query</span></dt><dd><p>
-The received query has passed all checks and is being forwarded to upstream
+This is an error message output when an unhandled exception is caught
+by the resolver.  After this, the resolver will shut itself down.
+Please submit a bug report.
+</p></dd><dt><a name="RESOLVER_FORWARD_ADDRESS"></a><span class="term">RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</span></dt><dd><p>
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address.  If multiple addresses are
+specified, it will appear once for each address.
+</p></dd><dt><a name="RESOLVER_FORWARD_QUERY"></a><span class="term">RESOLVER_FORWARD_QUERY processing forward query</span></dt><dd><p>
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
 servers.
-</p></dd><dt><a name="RESOLVER_HDRERR"></a><span class="term">RESOLVER_HDRERR message received, exception when processing header: %1</span></dt><dd><p>
-A debug message noting that an exception occurred during the processing of
-a received packet.  The packet has been dropped.
+</p></dd><dt><a name="RESOLVER_HEADER_ERROR"></a><span class="term">RESOLVER_HEADER_ERROR message received, exception when processing header: %1</span></dt><dd><p>
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet.  The packet has
+been dropped.
 </p></dd><dt><a name="RESOLVER_IXFR"></a><span class="term">RESOLVER_IXFR IXFR request received</span></dt><dd><p>
-The resolver received a NOTIFY message over TCP.  The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_LKTMOSMALL"></a><span class="term">RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
-</p></dd><dt><a name="RESOLVER_NFYNOTAUTH"></a><span class="term">RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</span></dt><dd><p>
-The resolver received a NOTIFY message.  As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
-</p></dd><dt><a name="RESOLVER_NORMQUERY"></a><span class="term">RESOLVER_NORMQUERY processing normal query</span></dt><dd><p>
-The received query has passed all checks and is being processed by the resolver.
-</p></dd><dt><a name="RESOLVER_NOROOTADDR"></a><span class="term">RESOLVER_NOROOTADDR no root addresses available</span></dt><dd><p>
-A warning message during startup, indicates that no root addresses have been
-set.  This may be because the resolver will get them from a priming query.
-</p></dd><dt><a name="RESOLVER_NOTIN"></a><span class="term">RESOLVER_NOTIN non-IN class request received, returning REFUSED message</span></dt><dd><p>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
-</p></dd><dt><a name="RESOLVER_NOTONEQUES"></a><span class="term">RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</span></dt><dd><p>
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message.  This is a malformed
-message, as a DNS query must contain only one question.  The resolver will
-return a message to the sender with the RCODE set to FORMERR.
-</p></dd><dt><a name="RESOLVER_OPCODEUNS"></a><span class="term">RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</span></dt><dd><p>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes).  It will return a message to the sender
-with the RCODE set to NOTIMP.
-</p></dd><dt><a name="RESOLVER_PARSEERR"></a><span class="term">RESOLVER_PARSEERR error parsing received message: %1 - returning %2</span></dt><dd><p>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded).  The message parameters give
-a textual description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_PRINTMSG"></a><span class="term">RESOLVER_PRINTMSG print message command, aeguments are: %1</span></dt><dd><p>
-This message is logged when a "print_message" command is received over the
-command channel.
-</p></dd><dt><a name="RESOLVER_PROTERR"></a><span class="term">RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded).  The message parameters give a textual
-description of the problem and the RCODE returned.
-</p></dd><dt><a name="RESOLVER_QUSETUP"></a><span class="term">RESOLVER_QUSETUP query setup</span></dt><dd><p>
-A debug message noting that the resolver is creating a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUSHUT"></a><span class="term">RESOLVER_QUSHUT query shutdown</span></dt><dd><p>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
-</p></dd><dt><a name="RESOLVER_QUTMOSMALL"></a><span class="term">RESOLVER_QUTMOSMALL query timeout of %1 is too small</span></dt><dd><p>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone).  Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="RESOLVER_LOOKUP_TIME_SMALL"></a><span class="term">RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small.  The configuration
+update will not be applied.
+</p></dd><dt><a name="RESOLVER_MESSAGE_ERROR"></a><span class="term">RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded).  The message parameters give a textual description
+of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_NEGATIVE_RETRIES"></a><span class="term">RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</span></dt><dd><p>
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid.  The
+configuration update was abandoned and the parameters were not changed.
+</p></dd><dt><a name="RESOLVER_NON_IN_PACKET"></a><span class="term">RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</span></dt><dd><p>
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class.  The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
+</p></dd><dt><a name="RESOLVER_NORMAL_QUERY"></a><span class="term">RESOLVER_NORMAL_QUERY processing normal query</span></dt><dd><p>
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
+</p></dd><dt><a name="RESOLVER_NOTIFY_RECEIVED"></a><span class="term">RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</span></dt><dd><p>
+The resolver has received a NOTIFY message.  As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
+</p></dd><dt><a name="RESOLVER_NOT_ONE_QUESTION"></a><span class="term">RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</span></dt><dd><p>
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message.  This is a malformed message, as a DNS query must contain
+only one question.  The resolver will return a message to the sender
+with the RCODE set to FORMERR.
+</p></dd><dt><a name="RESOLVER_NO_ROOT_ADDRESS"></a><span class="term">RESOLVER_NO_ROOT_ADDRESS no root addresses available</span></dt><dd><p>
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set.  This may be because the resolver will
+get them from a priming query.
+</p></dd><dt><a name="RESOLVER_PARSE_ERROR"></a><span class="term">RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
+</p></dd><dt><a name="RESOLVER_PRINT_COMMAND"></a><span class="term">RESOLVER_PRINT_COMMAND print message command, arguments are: %1</span></dt><dd><p>
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
+</p></dd><dt><a name="RESOLVER_PROTOCOL_ERROR"></a><span class="term">RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</span></dt><dd><p>
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded).  The message parameters
+give a textual description of the problem and the RCODE returned.
+</p></dd><dt><a name="RESOLVER_QUERY_ACCEPTED"></a><span class="term">RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</span></dt><dd><p>
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL.  The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_DROPPED"></a><span class="term">RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL.  Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_REJECTED"></a><span class="term">RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</span></dt><dd><p>
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL.  This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
+</p></dd><dt><a name="RESOLVER_QUERY_SETUP"></a><span class="term">RESOLVER_QUERY_SETUP query setup</span></dt><dd><p>
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_SHUTDOWN"></a><span class="term">RESOLVER_QUERY_SHUTDOWN query shutdown</span></dt><dd><p>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</p></dd><dt><a name="RESOLVER_QUERY_TIME_SMALL"></a><span class="term">RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</span></dt><dd><p>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small.  The configuration
+parameters were not changed.
+</p></dd><dt><a name="RESOLVER_RECEIVED_MESSAGE"></a><span class="term">RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</span></dt><dd><p>
+This is a debug message indicating that the resolver has received a
+DNS message.  Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
 </p></dd><dt><a name="RESOLVER_RECURSIVE"></a><span class="term">RESOLVER_RECURSIVE running in recursive mode</span></dt><dd><p>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
-</p></dd><dt><a name="RESOLVER_RECVMSG"></a><span class="term">RESOLVER_RECVMSG resolver has received a DNS message</span></dt><dd><p>
-A debug message indicating that the resolver has received a message.  Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
-</p></dd><dt><a name="RESOLVER_RETRYNEG"></a><span class="term">RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</span></dt><dd><p>
-An error message indicating that the resolver configuration has specified a
-negative retry count.  Only zero or positive values are valid.
-</p></dd><dt><a name="RESOLVER_ROOTADDR"></a><span class="term">RESOLVER_ROOTADDR setting root address %1(%2)</span></dt><dd><p>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
-</p></dd><dt><a name="RESOLVER_SERVICE"></a><span class="term">RESOLVER_SERVICE service object created</span></dt><dd><p>
-A debug message, output when the main service object (which handles the
-received queries) is created.
-</p></dd><dt><a name="RESOLVER_SETPARAM"></a><span class="term">RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
-A debug message, lists the parameters associated with the message.  These are:
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
+</p></dd><dt><a name="RESOLVER_SERVICE_CREATED"></a><span class="term">RESOLVER_SERVICE_CREATED service object created</span></dt><dd><p>
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
+</p></dd><dt><a name="RESOLVER_SET_PARAMS"></a><span class="term">RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</span></dt><dd><p>
+This debug message lists the parameters being set for the resolver.  These are:
 query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers.  Client timeout: the interval to resolver a query by
+to upstream servers.  Client timeout: the interval to resolve a query by
 a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
 resolver gives up trying to resolve a query.  Retry count: the number of times
 the resolver will retry a query to an upstream server if it gets a timeout.
 </p><p>
 The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
 upstream nameservers.  Even if none of these queries timeout, the total time
 taken to perform all the queries may exceed the client timeout.  When this
 happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache.  However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+with the resolution process; data received is added to the cache.  However,
+there comes a time - the lookup timeout - when even the resolver gives up.
 At this point it will wait for pending upstream queries to complete or
 timeout and drop the query.
+</p></dd><dt><a name="RESOLVER_SET_QUERY_ACL"></a><span class="term">RESOLVER_SET_QUERY_ACL query ACL is configured</span></dt><dd><p>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</p></dd><dt><a name="RESOLVER_SET_ROOT_ADDRESS"></a><span class="term">RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</span></dt><dd><p>
+This message gives the address of one of the root servers used by the
+resolver.  It is output during startup and may appear multiple times,
+once for each root server address.
 </p></dd><dt><a name="RESOLVER_SHUTDOWN"></a><span class="term">RESOLVER_SHUTDOWN resolver shutdown complete</span></dt><dd><p>
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
 </p></dd><dt><a name="RESOLVER_STARTED"></a><span class="term">RESOLVER_STARTED resolver started</span></dt><dd><p>
 This informational message is output by the resolver when all initialization
 has been completed and it is entering its main loop.
 </p></dd><dt><a name="RESOLVER_STARTING"></a><span class="term">RESOLVER_STARTING starting resolver with command line '%1'</span></dt><dd><p>
 An informational message, this is output when the resolver starts up.
-</p></dd><dt><a name="RESOLVER_UNEXRESP"></a><span class="term">RESOLVER_UNEXRESP received unexpected response, ignoring</span></dt><dd><p>
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+</p></dd><dt><a name="RESOLVER_UNEXPECTED_RESPONSE"></a><span class="term">RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</span></dt><dd><p>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries.  The packet
+has been ignored.
+</p></dd><dt><a name="RESOLVER_UNSUPPORTED_OPCODE"></a><span class="term">RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</span></dt><dd><p>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes).  It will return
+a message to the sender with the RCODE set to NOTIMP.
+</p></dd><dt><a name="SRVCOMM_ADDRESSES_NOT_LIST"></a><span class="term">SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</span></dt><dd><p>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_FAIL"></a><span class="term">SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</span></dt><dd><p>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_MISSING"></a><span class="term">SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_TYPE"></a><span class="term">SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</span></dt><dd><p>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</p></dd><dt><a name="SRVCOMM_ADDRESS_UNRECOVERABLE"></a><span class="term">SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</span></dt><dd><p>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</p><p>
+The condition indicates problems with the server and/or the system on
+which it is running.  The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</p></dd><dt><a name="SRVCOMM_ADDRESS_VALUE"></a><span class="term">SRVCOMM_ADDRESS_VALUE address to set: %1#%2</span></dt><dd><p>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</p></dd><dt><a name="SRVCOMM_KEYS_DEINIT"></a><span class="term">SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</p></dd><dt><a name="SRVCOMM_KEYS_INIT"></a><span class="term">SRVCOMM_KEYS_INIT initializing TSIG keyring</span></dt><dd><p>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</p></dd><dt><a name="SRVCOMM_KEYS_UPDATE"></a><span class="term">SRVCOMM_KEYS_UPDATE updating TSIG keyring</span></dt><dd><p>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</p></dd><dt><a name="SRVCOMM_PORT_RANGE"></a><span class="term">SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</span></dt><dd><p>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</p></dd><dt><a name="SRVCOMM_SET_LISTEN"></a><span class="term">SRVCOMM_SET_LISTEN setting addresses to listen to</span></dt><dd><p>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</p></dd><dt><a name="STATHTTPD_BAD_OPTION_VALUE"></a><span class="term">STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</p></dd><dt><a name="STATHTTPD_CC_SESSION_ERROR"></a><span class="term">STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</p></dd><dt><a name="STATHTTPD_CLOSING"></a><span class="term">STATHTTPD_CLOSING closing %1#%2</span></dt><dd><p>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</p></dd><dt><a name="STATHTTPD_CLOSING_CC_SESSION"></a><span class="term">STATHTTPD_CLOSING_CC_SESSION stopping cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</p></dd><dt><a name="STATHTTPD_HANDLE_CONFIG"></a><span class="term">STATHTTPD_HANDLE_CONFIG reading configuration: %1</span></dt><dd><p>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_STATUS_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</p></dd><dt><a name="STATHTTPD_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</p></dd><dt><a name="STATHTTPD_SERVER_DATAERROR"></a><span class="term">STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+</p></dd><dt><a name="STATHTTPD_SERVER_ERROR"></a><span class="term">STATHTTPD_SERVER_ERROR HTTP server error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</p></dd><dt><a name="STATHTTPD_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</p></dd><dt><a name="STATHTTPD_SHUTDOWN"></a><span class="term">STATHTTPD_SHUTDOWN shutting down</span></dt><dd><p>
+The stats-httpd daemon is shutting down.
+</p></dd><dt><a name="STATHTTPD_STARTED"></a><span class="term">STATHTTPD_STARTED listening on %1#%2</span></dt><dd><p>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</p></dd><dt><a name="STATHTTPD_STARTING_CC_SESSION"></a><span class="term">STATHTTPD_STARTING_CC_SESSION starting cc session</span></dt><dd><p>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</p></dd><dt><a name="STATHTTPD_START_SERVER_INIT_ERROR"></a><span class="term">STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</span></dt><dd><p>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</p></dd><dt><a name="STATHTTPD_STOPPED_BY_KEYBOARD"></a><span class="term">STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</p></dd><dt><a name="STATHTTPD_UNKNOWN_CONFIG_ITEM"></a><span class="term">STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</span></dt><dd><p>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</p></dd><dt><a name="STATS_BAD_OPTION_VALUE"></a><span class="term">STATS_BAD_OPTION_VALUE bad command line argument: %1</span></dt><dd><p>
+The stats module was called with a bad command-line argument and will
+not start.
+</p></dd><dt><a name="STATS_CC_SESSION_ERROR"></a><span class="term">STATS_CC_SESSION_ERROR error connecting to message bus: %1</span></dt><dd><p>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_NEW_CONFIG"></a><span class="term">STATS_RECEIVED_NEW_CONFIG received new configuration: %1</span></dt><dd><p>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</p></dd><dt><a name="STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema</span></dt><dd><p>
+The stats module received a command to show all statistics schemas of all modules.
+</p></dd><dt><a name="STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1</span></dt><dd><p>
+The stats module received a command to show the specified statistics schema of the specified module.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</span></dt><dd><p>
+The stats module received a command to show all statistics that it has
+collected.
+</p></dd><dt><a name="STATS_RECEIVED_SHOW_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</span></dt><dd><p>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</p></dd><dt><a name="STATS_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+A shutdown command was sent to the stats module and it will now shut down.
+</p></dd><dt><a name="STATS_RECEIVED_STATUS_COMMAND"></a><span class="term">STATS_RECEIVED_STATUS_COMMAND received command to return status</span></dt><dd><p>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</p></dd><dt><a name="STATS_RECEIVED_UNKNOWN_COMMAND"></a><span class="term">STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</span></dt><dd><p>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</p></dd><dt><a name="STATS_SEND_REQUEST_BOSS"></a><span class="term">STATS_SEND_REQUEST_BOSS requesting boss to send statistics</span></dt><dd><p>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</p></dd><dt><a name="STATS_STARTING"></a><span class="term">STATS_STARTING starting</span></dt><dd><p>
+The stats module will be now starting.
+</p></dd><dt><a name="STATS_START_ERROR"></a><span class="term">STATS_START_ERROR stats module error: %1</span></dt><dd><p>
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
+</p></dd><dt><a name="STATS_STOPPED_BY_KEYBOARD"></a><span class="term">STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</p></dd><dt><a name="STATS_UNKNOWN_COMMAND_IN_SPEC"></a><span class="term">STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</span></dt><dd><p>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.  Note: due to the code structure
+this can only happen for AXFR.
+</p></dd><dt><a name="XFRIN_AXFR_INCONSISTENT_SOA"></a><span class="term">XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received</span></dt><dd><p>
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same.  According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold.  There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected.  On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs.  For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found.  If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
+The given master address is not a valid IP address.
+</p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
+The master port as read from the configuration is not a valid port number.
+</p></dd><dt><a name="XFRIN_BAD_TSIG_KEY_STRING"></a><span class="term">XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFRIN_BAD_ZONE_CLASS"></a><span class="term">XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</span></dt><dd><p>
+The zone class as read from the configuration is not a valid DNS class.
+</p></dd><dt><a name="XFRIN_CC_SESSION_ERROR"></a><span class="term">XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</p></dd><dt><a name="XFRIN_COMMAND_ERROR"></a><span class="term">XFRIN_COMMAND_ERROR error while executing command '%1': %2</span></dt><dd><p>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</p></dd><dt><a name="XFRIN_GOT_INCREMENTAL_RESP"></a><span class="term">XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1</span></dt><dd><p>
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found.  This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+</p></dd><dt><a name="XFRIN_GOT_NONINCREMENTAL_RESP"></a><span class="term">XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1</span></dt><dd><p>
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA.  Non incremental transfer is
+either AXFR or AXFR-style IXFR.  In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
+</p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR"></a><span class="term">XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</span></dt><dd><p>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_NOTIFY_UNKNOWN_MASTER"></a><span class="term">XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3</span></dt><dd><p>
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
+</p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</p></dd><dt><a name="XFRIN_STARTING"></a><span class="term">XFRIN_STARTING starting resolver with command line '%1'</span></dt><dd><p>
+An informational message, this is output when the resolver starts up.
+</p></dd><dt><a name="XFRIN_STOPPED_BY_KEYBOARD"></a><span class="term">XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</p></dd><dt><a name="XFRIN_XFR_OTHER_FAILURE"></a><span class="term">XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3</span></dt><dd><p>
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module.  Possible reasons are a broken DNS message or failure
+in database connection.  The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_XFR_PROCESS_FAILURE"></a><span class="term">XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</span></dt><dd><p>
+An XFR session failed outside the main protocol handling.  This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure.  Details of the error are shown in
+the log message.  In general, these errors are not really expected
+ones, and indicate an installation error or a program bug.  The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete.  So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</span></dt><dd><p>
+The XFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_FALLBACK"></a><span class="term">XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</span></dt><dd><p>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_STARTED"></a><span class="term">XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded</span></dt><dd><p>
+The XFR transfer of the given zone was successfully completed.
+</p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</p></dd><dt><a name="XFROUT_CC_SESSION_ERROR"></a><span class="term">XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</span></dt><dd><p>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</p></dd><dt><a name="XFROUT_CC_SESSION_TIMEOUT_ERROR"></a><span class="term">XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</span></dt><dd><p>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_CONFIG_ERROR"></a><span class="term">XFROUT_CONFIG_ERROR error found in configuration data: %1</span></dt><dd><p>
+The xfrout process encountered an error when installing the configuration at
+startup time.  Details of the error are included in the log message.
+</p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</p></dd><dt><a name="XFROUT_HANDLE_QUERY_ERROR"></a><span class="term">XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</span></dt><dd><p>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</p></dd><dt><a name="XFROUT_IMPORT"></a><span class="term">XFROUT_IMPORT error importing python module: %1</span></dt><dd><p>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</p></dd><dt><a name="XFROUT_IXFR_MULTIPLE_SOA"></a><span class="term">XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</span></dt><dd><p>
+An IXFR request was received with more than one SOA RRs in the authority
+section.  The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_JOURNAL_SUPPORT"></a><span class="term">XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</span></dt><dd><p>
+An IXFR request was received but the underlying data source did
+not support journaling.  The xfrout daemon fell back to AXFR-style
+IXFR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_SOA"></a><span class="term">XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</span></dt><dd><p>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_VERSION"></a><span class="term">XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</span></dt><dd><p>
+An IXFR request was received, but the requested range of differences
+were not found in the data source.  The xfrout daemon fell back to
+AXFR-style IXFR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_ZONE"></a><span class="term">XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</span></dt><dd><p>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source.  This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone.  The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</p></dd><dt><a name="XFROUT_IXFR_UPTODATE"></a><span class="term">XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</span></dt><dd><p>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server.  The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic.  This will soon
+be implemented.
+</p></dd><dt><a name="XFROUT_MODULECC_SESSION_ERROR"></a><span class="term">XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</span></dt><dd><p>
+There was a problem in the lower level module handling configuration and
+control commands.  This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization.  A detailed error message from the module
+will also be displayed.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</p></dd><dt><a name="XFROUT_NEW_CONFIG_DONE"></a><span class="term">XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</span></dt><dd><p>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</p></dd><dt><a name="XFROUT_NOTIFY_COMMAND"></a><span class="term">XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</span></dt><dd><p>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</p></dd><dt><a name="XFROUT_PARSE_QUERY_ERROR"></a><span class="term">XFROUT_PARSE_QUERY_ERROR error parsing query: %1</span></dt><dd><p>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</p></dd><dt><a name="XFROUT_PROCESS_REQUEST_ERROR"></a><span class="term">XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</span></dt><dd><p>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</span></dt><dd><p>
+The xfrout process silently dropped a request to transfer zone to
+given host.  This is required by the ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</p></dd><dt><a name="XFROUT_QUERY_QUOTA_EXCCEEDED"></a><span class="term">XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</span></dt><dd><p>
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message.  The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
+</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</span></dt><dd><p>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</p></dd><dt><a name="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR"></a><span class="term">XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</span></dt><dd><p>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</p></dd><dt><a name="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</span></dt><dd><p>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</p></dd><dt><a name="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR"></a><span class="term">XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</span></dt><dd><p>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</p></dd><dt><a name="XFROUT_SOCKET_SELECT_ERROR"></a><span class="term">XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</span></dt><dd><p>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</p></dd><dt><a name="XFROUT_STOPPED_BY_KEYBOARD"></a><span class="term">XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</p></dd><dt><a name="XFROUT_STOPPING"></a><span class="term">XFROUT_STOPPING the xfrout daemon is shutting down</span></dt><dd><p>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</p></dd><dt><a name="XFROUT_UNIX_SOCKET_FILE_IN_USE"></a><span class="term">XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</span></dt><dd><p>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_CHECK_ERROR"></a><span class="term">XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</span></dt><dd><p>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage.  Some detail of the error is also included in the
+message.  The xfrout server tries to return a SERVFAIL response in this case.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_DONE"></a><span class="term">XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_ERROR"></a><span class="term">XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_FAILED"></a><span class="term">XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_STARTED"></a><span class="term">XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
+</p></dd><dt><a name="ZONEMGR_CCSESSION_ERROR"></a><span class="term">ZONEMGR_CCSESSION_ERROR command channel session error: %1</span></dt><dd><p>
+An error was encountered on the command channel.  The message indicates
+the nature of the error.
+</p></dd><dt><a name="ZONEMGR_JITTER_TOO_BIG"></a><span class="term">ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</span></dt><dd><p>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</p></dd><dt><a name="ZONEMGR_KEYBOARD_INTERRUPT"></a><span class="term">ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</span></dt><dd><p>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</p></dd><dt><a name="ZONEMGR_LOAD_ZONE"></a><span class="term">ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</p></dd><dt><a name="ZONEMGR_NO_MASTER_ADDRESS"></a><span class="term">ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</span></dt><dd><p>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received.  This may be due to an internal programming error; please
+submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_SOA"></a><span class="term">ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</span></dt><dd><p>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record.  The load has
+been abandoned.
+</p></dd><dt><a name="ZONEMGR_NO_TIMER_THREAD"></a><span class="term">ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</span></dt><dd><p>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running.  This may indicate an
+internal program error.  Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_CLASS"></a><span class="term">ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_NO_ZONE_NAME"></a><span class="term">ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</span></dt><dd><p>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_NOTIFY"></a><span class="term">ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel.  The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_SHUTDOWN"></a><span class="term">ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_UNKNOWN"></a><span class="term">ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</span></dt><dd><p>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel.  The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error.  Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_FAILED"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel.  The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</p></dd><dt><a name="ZONEMGR_RECEIVE_XFRIN_SUCCESS"></a><span class="term">ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</span></dt><dd><p>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel.  The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</p></dd><dt><a name="ZONEMGR_REFRESH_ZONE"></a><span class="term">ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</span></dt><dd><p>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</p></dd><dt><a name="ZONEMGR_SELECT_ERROR"></a><span class="term">ZONEMGR_SELECT_ERROR error with select(): %1</span></dt><dd><p>
+An attempt to wait for input from a socket failed.  The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</p></dd><dt><a name="ZONEMGR_SEND_FAIL"></a><span class="term">ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</span></dt><dd><p>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed.  The session between the modules has been closed.
+</p></dd><dt><a name="ZONEMGR_SESSION_ERROR"></a><span class="term">ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon.  The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SESSION_TIMEOUT"></a><span class="term">ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</span></dt><dd><p>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon.  The most usual cause of this
+problem is that the daemon is not running.
+</p></dd><dt><a name="ZONEMGR_SHUTDOWN"></a><span class="term">ZONEMGR_SHUTDOWN zone manager has shut down</span></dt><dd><p>
+A debug message, output when the zone manager has shut down completely.
+</p></dd><dt><a name="ZONEMGR_STARTING"></a><span class="term">ZONEMGR_STARTING zone manager starting</span></dt><dd><p>
+A debug message output when the zone manager starts up.
+</p></dd><dt><a name="ZONEMGR_TIMER_THREAD_RUNNING"></a><span class="term">ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</span></dt><dd><p>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running.  It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer.  Please submit
+a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_FAIL"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager.  This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case).  Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_NOTIFIED"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager.  This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case).  Please submit a bug report.
+</p></dd><dt><a name="ZONEMGR_UNKNOWN_ZONE_SUCCESS"></a><span class="term">ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</span></dt><dd><p>
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager.  This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
 </p></dd></dl></div><p>
     </p></div></div></body></html>
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index eaa8bb9..4dc02d4 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -5,6 +5,12 @@
 <!ENTITY % version SYSTEM "version.ent">
 %version;
 ]>
+<!--
+     This XML document is generated using the system_messages.py tool
+     based on the .mes message files.
+
+     Do not edit this file.
+-->
 <book>
   <?xml-stylesheet href="bind10-guide.css" type="text/css"?>
 
@@ -62,16 +68,16 @@
     <para>
       <variablelist>
 
-<varlistentry id="ASIODNS_FETCHCOMP">
-<term>ASIODNS_FETCHCOMP upstream fetch to %1(%2) has now completed</term>
+<varlistentry id="ASIODNS_FETCH_COMPLETED">
+<term>ASIODNS_FETCH_COMPLETED upstream fetch to %1(%2) has now completed</term>
 <listitem><para>
-A debug message, this records the the upstream fetch (a query made by the
+A debug message, this records that the upstream fetch (a query made by the
 resolver on behalf of its client) to the specified address has completed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="ASIODNS_FETCHSTOP">
-<term>ASIODNS_FETCHSTOP upstream fetch to %1(%2) has been stopped</term>
+<varlistentry id="ASIODNS_FETCH_STOPPED">
+<term>ASIODNS_FETCH_STOPPED upstream fetch to %1(%2) has been stopped</term>
 <listitem><para>
 An external component has requested the halting of an upstream fetch.  This
 is an allowed operation, and the message should only appear if debug is
@@ -79,27 +85,27 @@ enabled.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="ASIODNS_OPENSOCK">
-<term>ASIODNS_OPENSOCK error %1 opening %2 socket to %3(%4)</term>
+<varlistentry id="ASIODNS_OPEN_SOCKET">
+<term>ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)</term>
 <listitem><para>
 The asynchronous I/O code encountered an error when trying to open a socket
 of the specified protocol in order to send a message to the target address.
-The the number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
 message.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="ASIODNS_RECVSOCK">
-<term>ASIODNS_RECVSOCK error %1 reading %2 data from %3(%4)</term>
+<varlistentry id="ASIODNS_READ_DATA">
+<term>ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)</term>
 <listitem><para>
-The asynchronous I/O code encountered an error when trying read data from
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
+The asynchronous I/O code encountered an error when trying to read data from
+the specified address on the given protocol.  The number of the system
+error that caused the problem is given in the message.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="ASIODNS_RECVTMO">
-<term>ASIODNS_RECVTMO receive timeout while waiting for data from %1(%2)</term>
+<varlistentry id="ASIODNS_READ_TIMEOUT">
+<term>ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)</term>
 <listitem><para>
 An upstream fetch from the specified address timed out.  This may happen for
 any number of reasons and is most probably a problem at the remote server
@@ -108,29 +114,1596 @@ enabled.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="ASIODNS_SENDSOCK">
-<term>ASIODNS_SENDSOCK error %1 sending data using %2 to %3(%4)</term>
+<varlistentry id="ASIODNS_SEND_DATA">
+<term>ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)</term>
+<listitem><para>
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol.  The number of the system
+error that caused the problem is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_ORIGIN">
+<term>ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<listitem><para>
+An internal consistency check on the origin of a message from the
+asynchronous I/O module failed. This may indicate an internal error;
+please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ASIODNS_UNKNOWN_RESULT">
+<term>ASIODNS_UNKNOWN_RESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<listitem><para>
+An internal error indicating that the termination method of the resolver's
+upstream fetch class was called with an unknown result code (which is
+given in the message).  Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_ERROR">
+<term>AUTH_AXFR_ERROR error handling AXFR request: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it
+has encountered an error processing an AXFR request. The message gives
+the reason for the error, and the server will return a SERVFAIL code to
+the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_AXFR_UDP">
+<term>AUTH_AXFR_UDP AXFR query received over UDP</term>
+<listitem><para>
+This is a debug message output when the authoritative server has received
+an AXFR query over UDP. Use of UDP for AXFRs is not permitted by the
+protocol, so the server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_COMMAND_FAILED">
+<term>AUTH_COMMAND_FAILED execution of command channel instruction '%1' failed: %2</term>
+<listitem><para>
+Execution of the specified command by the authoritative server failed. The
+message contains the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_CREATED">
+<term>AUTH_CONFIG_CHANNEL_CREATED configuration session channel created</term>
+<listitem><para>
+This is a debug message indicating that authoritative server has created
+the channel to the configuration manager.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_ESTABLISHED">
+<term>AUTH_CONFIG_CHANNEL_ESTABLISHED configuration session channel established</term>
+<listitem><para>
+This is a debug message indicating that authoritative server
+has established communication the configuration manager over the
+previously-created channel. It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_CHANNEL_STARTED">
+<term>AUTH_CONFIG_CHANNEL_STARTED configuration session channel started</term>
+<listitem><para>
+This is a debug message, issued when the authoritative server has
+posted a request to be notified when new configuration information is
+available. It is issued during server startup is an indication that
+the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_LOAD_FAIL">
+<term>AUTH_CONFIG_LOAD_FAIL load of configuration failed: %1</term>
+<listitem><para>
+An attempt to configure the server with information from the configuration
+database during the startup sequence has failed. (The reason for
+the failure is given in the message.) The server will continue its
+initialization although it may not be configured in the desired way.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_CONFIG_UPDATE_FAIL">
+<term>AUTH_CONFIG_UPDATE_FAIL update of configuration failed: %1</term>
+<listitem><para>
+At attempt to update the configuration the server with information
+from the configuration database has failed, the reason being given in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DATA_SOURCE">
+<term>AUTH_DATA_SOURCE data source database file: %1</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it accesses a
+datebase data source, listing the file that is being accessed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_DNS_SERVICES_CREATED">
+<term>AUTH_DNS_SERVICES_CREATED DNS services created</term>
+<listitem><para>
+This is a debug message indicating that the component that will handling
+incoming queries for the authoritative server (DNSServices) has been
+successfully created. It is issued during server startup is an indication
+that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_HEADER_PARSE_FAIL">
+<term>AUTH_HEADER_PARSE_FAIL unable to parse header in received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse the header of a received DNS packet has failed. (The
+reason for the failure is given in the message.) The server will drop the
+packet.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_INVALID_STATISTICS_DATA">
+<term>AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified</term>
+<listitem><para>
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_TSIG">
+<term>AUTH_LOAD_TSIG loading TSIG keys</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has requested the keyring holding TSIG keys from the configuration
+database. It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_LOAD_ZONE">
+<term>AUTH_LOAD_ZONE loaded zone %1/%2</term>
+<listitem><para>
+This debug message is issued during the processing of the 'loadzone' command
+when the authoritative server has successfully loaded the named zone of the
+named class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_DISABLED">
+<term>AUTH_MEM_DATASRC_DISABLED memory data source is disabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is disabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_MEM_DATASRC_ENABLED">
+<term>AUTH_MEM_DATASRC_ENABLED memory data source is enabled for class %1</term>
+<listitem><para>
+This is a debug message reporting that the authoritative server has
+discovered that the memory data source is enabled for the given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_QUESTIONS">
+<term>AUTH_NOTIFY_QUESTIONS invalid number of questions (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that contains zero or more than one question. (A valid
+NOTIFY packet contains one question.) The server will return a FORMERR
+error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NOTIFY_RRTYPE">
+<term>AUTH_NOTIFY_RRTYPE invalid question RR type (%1) in incoming NOTIFY</term>
+<listitem><para>
+This debug message is logged by the authoritative server when it receives
+a NOTIFY packet that an RR type of something other than SOA in the
+question section. (The RR type received is included in the message.) The
+server will return a FORMERR error to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_STATS_SESSION">
+<term>AUTH_NO_STATS_SESSION session interface for statistics is not available</term>
+<listitem><para>
+The authoritative server had no session with the statistics module at the
+time it attempted to send it data: the attempt has been abandoned. This
+could be an error in configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_NO_XFRIN">
+<term>AUTH_NO_XFRIN received NOTIFY but XFRIN session is not running</term>
+<listitem><para>
+This is a debug message produced by the authoritative server when it receives
+a NOTIFY packet but the XFRIN process is not running. The packet will be
+dropped and nothing returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PARSE_ERROR">
+<term>AUTH_PACKET_PARSE_ERROR unable to parse received DNS packet: %1</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to something other
+than a protocol error. The reason for the failure is given in the message;
+the server will return a SERVFAIL error code to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_PROTOCOL_ERROR">
+<term>AUTH_PACKET_PROTOCOL_ERROR DNS packet protocol error: %1. Returning %2</term>
+<listitem><para>
+This is a debug message, generated by the authoritative server when an
+attempt to parse a received DNS packet has failed due to a protocol error.
+The reason for the failure is given in the message, as is the error code
+that will be returned to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PACKET_RECEIVED">
+<term>AUTH_PACKET_RECEIVED message received:\n%1</term>
+<listitem><para>
+This is a debug message output by the authoritative server when it
+receives a valid DNS packet.
+</para><para>
+Note: This message includes the packet received, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_PROCESS_FAIL">
+<term>AUTH_PROCESS_FAIL message processing failure: %1</term>
+<listitem><para>
+This message is generated by the authoritative server when it has
+encountered an internal error whilst processing a received packet:
+the cause of the error is included in the message.
+</para><para>
+The server will return a SERVFAIL error code to the sender of the packet.
+This message indicates a potential error in the server.  Please open a
+bug ticket for this issue.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_COMMAND">
+<term>AUTH_RECEIVED_COMMAND command '%1' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command on the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RECEIVED_SENDSTATS">
+<term>AUTH_RECEIVED_SENDSTATS command 'sendstats' received</term>
+<listitem><para>
+This is a debug message issued when the authoritative server has received
+a command from the statistics module to send it data. The 'sendstats'
+command is handled differently to other commands, which is why the debug
+message associated with it has its own code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_RESPONSE_RECEIVED">
+<term>AUTH_RESPONSE_RECEIVED received response message, ignoring</term>
+<listitem><para>
+This is a debug message, this is output if the authoritative server
+receives a DNS packet with the QR bit set, i.e. a DNS response. The
+server ignores the packet as it only responds to question packets.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_ERROR_RESPONSE">
+<term>AUTH_SEND_ERROR_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+an error response to the originator of the query. A previous message will
+have recorded details of the failure.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SEND_NORMAL_RESPONSE">
+<term>AUTH_SEND_NORMAL_RESPONSE sending an error response (%1 bytes):\n%2</term>
+<listitem><para>
+This is a debug message recording that the authoritative server is sending
+a response to the originator of a query.
+</para><para>
+Note: This message includes the packet sent, rendered in the form of
+multiple lines of text. For this reason, it is suggested that this log message
+not be routed to the syslog file, where the multiple lines could confuse
+programs that expect a format of one message per line.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_CREATED">
+<term>AUTH_SERVER_CREATED server created</term>
+<listitem><para>
+An informational message indicating that the authoritative server process has
+been created and is initializing. The AUTH_SERVER_STARTED message will be
+output when initialization has successfully completed and the server starts
+accepting queries.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_FAILED">
+<term>AUTH_SERVER_FAILED server failed: %1</term>
+<listitem><para>
+The authoritative server has encountered a fatal error and is terminating. The
+reason for the failure is included in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SERVER_STARTED">
+<term>AUTH_SERVER_STARTED server started</term>
+<listitem><para>
+Initialization of the authoritative server has completed successfully
+and it is entering the main loop, waiting for queries to arrive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_SQLITE3">
+<term>AUTH_SQLITE3 nothing to do for loading sqlite3</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+found that the data source it is loading is an SQLite3 data source,
+so no further validation is needed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_CREATED">
+<term>AUTH_STATS_CHANNEL_CREATED STATS session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the statistics process.  It is issued during server
+startup is an indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_CHANNEL_ESTABLISHED">
+<term>AUTH_STATS_CHANNEL_ESTABLISHED STATS session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server
+has established communication over the previously created statistics
+channel.  It is issued during server startup is an indication that the
+initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_COMMS">
+<term>AUTH_STATS_COMMS communication error in sending statistics data: %1</term>
+<listitem><para>
+An error was encountered when the authoritative server tried to send data
+to the statistics daemon. The message includes additional information
+describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMEOUT">
+<term>AUTH_STATS_TIMEOUT timeout while sending statistics data: %1</term>
+<listitem><para>
+The authoritative server sent data to the statistics daemon but received
+no acknowledgement within the specified time. The message includes
+additional information describing the reason for the failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_DISABLED">
+<term>AUTH_STATS_TIMER_DISABLED statistics timer has been disabled</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+disabled in the authoritative server and no statistics information is
+being produced.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_STATS_TIMER_SET">
+<term>AUTH_STATS_TIMER_SET statistics timer set to %1 second(s)</term>
+<listitem><para>
+This is a debug message indicating that the statistics timer has been
+enabled and that the authoritative server will produce statistics data
+at the specified interval.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_UNSUPPORTED_OPCODE">
+<term>AUTH_UNSUPPORTED_OPCODE unsupported opcode: %1</term>
+<listitem><para>
+This is a debug message, produced when a received DNS packet being
+processed by the authoritative server has been found to contain an
+unsupported opcode. (The opcode is included in the message.) The server
+will return an error code of NOTIMPL to the sender.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_CREATED">
+<term>AUTH_XFRIN_CHANNEL_CREATED XFRIN session channel created</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+created a channel to the XFRIN (Transfer-in) process.  It is issued
+during server startup is an indication that the initialization is
+proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_XFRIN_CHANNEL_ESTABLISHED">
+<term>AUTH_XFRIN_CHANNEL_ESTABLISHED XFRIN session channel established</term>
+<listitem><para>
+This is a debug message indicating that the authoritative server has
+established communication over the previously-created channel to the
+XFRIN (Transfer-in) process.  It is issued during server startup is an
+indication that the initialization is proceeding normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_COMMS">
+<term>AUTH_ZONEMGR_COMMS error communicating with zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY request.
+An error (listed in the message) has been encountered whilst communicating
+with the zone manager. The NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="AUTH_ZONEMGR_ERROR">
+<term>AUTH_ZONEMGR_ERROR received error response from zone manager: %1</term>
+<listitem><para>
+This is a debug message output during the processing of a NOTIFY
+request. The zone manager component has been informed of the request,
+but has returned an error response (which is included in the message). The
+NOTIFY request will not be honored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CHECK_MSGQ_ALREADY_RUNNING">
+<term>BIND10_CHECK_MSGQ_ALREADY_RUNNING checking if msgq is already running</term>
+<listitem><para>
+The boss process is starting up and will now check if the message bus
+daemon is already running. If so, it will not be able to start, as it
+needs a dedicated message bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_FAILED">
+<term>BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</term>
+<listitem><para>
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_RESTART">
+<term>BIND10_COMPONENT_RESTART component %1 is about to restart</term>
+<listitem><para>
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START">
+<term>BIND10_COMPONENT_START component %1 is starting</term>
+<listitem><para>
+The named component is about to be started by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START_EXCEPTION">
+<term>BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</term>
+<listitem><para>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_STOP">
+<term>BIND10_COMPONENT_STOP component %1 is being stopped</term>
+<listitem><para>
+A component is about to be asked to stop willingly by the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_UNSATISFIED">
+<term>BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</term>
+<listitem><para>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_BUILD">
+<term>BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'</term>
+<listitem><para>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_PLAN_INTERRUPTED">
+<term>BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</term>
+<listitem><para>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RECONFIGURE">
+<term>BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</term>
+<listitem><para>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RUN">
+<term>BIND10_CONFIGURATOR_RUN running plan of %1 tasks</term>
+<listitem><para>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_START">
+<term>BIND10_CONFIGURATOR_START bind10 component configurator is starting up</term>
+<listitem><para>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_STOP">
+<term>BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</term>
+<listitem><para>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_TASK">
+<term>BIND10_CONFIGURATOR_TASK performing task %1 on %2</term>
+<listitem><para>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_INVALID_STATISTICS_DATA">
+<term>BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified</term>
+<listitem><para>
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_INVALID_USER">
+<term>BIND10_INVALID_USER invalid user: %1</term>
+<listitem><para>
+The boss process was started with the -u option, to drop root privileges
+and continue running as the specified user, but the user is unknown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILLING_ALL_PROCESSES">
+<term>BIND10_KILLING_ALL_PROCESSES killing all started processes</term>
+<listitem><para>
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_KILL_PROCESS">
+<term>BIND10_KILL_PROCESS killing process %1</term>
+<listitem><para>
+The boss module is sending a kill signal to process with the given name,
+as part of the process of killing all started processes during a failed
+startup, as described for BIND10_KILLING_ALL_PROCESSES
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_ALREADY_RUNNING">
+<term>BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start</term>
+<listitem><para>
+There already appears to be a message bus daemon running. Either an
+old process was not shut down correctly, and needs to be killed, or
+another instance of BIND10, with the same msgq domain socket, is
+running, which needs to be stopped.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_MSGQ_DISAPPEARED">
+<term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
+<listitem><para>
+While listening on the message bus channel for messages, it suddenly
+disappeared. The msgq daemon may have died. This might lead to an
+inconsistent state of the system, and BIND 10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_PROCESS_ENDED">
+<term>BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</term>
+<listitem><para>
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_READING_BOSS_CONFIGURATION">
+<term>BIND10_READING_BOSS_CONFIGURATION reading boss configuration</term>
+<listitem><para>
+The boss process is starting up, and will now process the initial
+configuration, as received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_COMMAND">
+<term>BIND10_RECEIVED_COMMAND received command: %1</term>
+<listitem><para>
+The boss module received a command and shall now process it. The command
+is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_NEW_CONFIGURATION">
+<term>BIND10_RECEIVED_NEW_CONFIGURATION received new configuration: %1</term>
+<listitem><para>
+The boss module received a configuration update and is going to apply
+it now. The new configuration is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RECEIVED_SIGNAL">
+<term>BIND10_RECEIVED_SIGNAL received signal %1</term>
+<listitem><para>
+The boss module received the given signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTED_PROCESS">
+<term>BIND10_RESURRECTED_PROCESS resurrected %1 (PID %2)</term>
+<listitem><para>
+The given process has been restarted successfully, and is now running
+with the given process id.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_RESURRECTING_PROCESS">
+<term>BIND10_RESURRECTING_PROCESS resurrecting dead %1 process...</term>
+<listitem><para>
+The given process has ended unexpectedly, and is now restarted.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SELECT_ERROR">
+<term>BIND10_SELECT_ERROR error in select() call: %1</term>
+<listitem><para>
+There was a fatal error in the call to select(), used to see if a child
+process has ended or if there is a message on the message bus. This
+should not happen under normal circumstances and is considered fatal,
+so BIND 10 will now shut down. The specific error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGKILL">
+<term>BIND10_SEND_SIGKILL sending SIGKILL to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGKILL signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SEND_SIGTERM">
+<term>BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</term>
+<listitem><para>
+The boss module is sending a SIGTERM signal to the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SETUID">
+<term>BIND10_SETUID setting UID to %1</term>
+<listitem><para>
+The boss switches the user it runs as to the given UID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN">
+<term>BIND10_SHUTDOWN stopping the server</term>
+<listitem><para>
+The boss process received a command or signal telling it to shut down.
+It will send a shutdown command to each process. The processes that do
+not shut down will then receive a SIGTERM signal. If that doesn't work,
+it shall send SIGKILL signals to the processes still alive.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SHUTDOWN_COMPLETE">
+<term>BIND10_SHUTDOWN_COMPLETE all processes ended, shutdown complete</term>
+<listitem><para>
+All child processes have been stopped, and the boss process will now
+stop itself.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_CAUSE">
+<term>BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1</term>
+<listitem><para>
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_BAD_RESPONSE">
+<term>BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</term>
+<listitem><para>
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_EOF">
+<term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
+<listitem><para>
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_INIT">
+<term>BIND10_SOCKCREATOR_INIT initializing socket creator parser</term>
+<listitem><para>
+The boss module initializes routines for parsing the socket creator
+protocol.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_KILL">
+<term>BIND10_SOCKCREATOR_KILL killing the socket creator</term>
+<listitem><para>
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TERMINATE">
+<term>BIND10_SOCKCREATOR_TERMINATE terminating socket creator</term>
+<listitem><para>
+The boss module sends a request to terminate to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKCREATOR_TRANSPORT_ERROR">
+<term>BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1</term>
+<listitem><para>
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_CREATED">
+<term>BIND10_SOCKET_CREATED successfully created socket %1</term>
+<listitem><para>
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_ERROR">
+<term>BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3</term>
+<listitem><para>
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_SOCKET_GET">
+<term>BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</term>
+<listitem><para>
+The boss forwards a request for a socket to the socket creator.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_CC">
+<term>BIND10_STARTED_CC started configuration/command session</term>
+<listitem><para>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS">
+<term>BIND10_STARTED_PROCESS started %1</term>
+<listitem><para>
+The given process has successfully been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTED_PROCESS_PID">
+<term>BIND10_STARTED_PROCESS_PID started %1 (PID %2)</term>
+<listitem><para>
+The given process has successfully been started, and has the given PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING">
+<term>BIND10_STARTING starting BIND10: %1</term>
+<listitem><para>
+Informational message on startup that shows the full version.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_CC">
+<term>BIND10_STARTING_CC starting configuration/command session</term>
+<listitem><para>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS">
+<term>BIND10_STARTING_PROCESS starting process %1</term>
+<listitem><para>
+The boss module is starting the given process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT">
+<term>BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTING_PROCESS_PORT_ADDRESS">
+<term>BIND10_STARTING_PROCESS_PORT_ADDRESS starting process %1 (to listen on %2#%3)</term>
+<listitem><para>
+The boss module is starting the given process, which will listen on the
+given address and port number (written as <address>#<port>).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_COMPLETE">
+<term>BIND10_STARTUP_COMPLETE BIND 10 started</term>
+<listitem><para>
+All modules have been successfully started, and BIND 10 is now running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_ERROR">
+<term>BIND10_STARTUP_ERROR error during startup: %1</term>
+<listitem><para>
+There was a fatal error when BIND10 was trying to start. The error is
+shown, and BIND10 will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_UNEXPECTED_MESSAGE">
+<term>BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</term>
+<listitem><para>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected.  It may be that processes are starting
+of sequence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_UNRECOGNISED_MESSAGE">
+<term>BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</term>
+<listitem><para>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is not recognised.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_AUTH">
+<term>BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</term>
+<listitem><para>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_RESOLVER">
+<term>BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</term>
+<listitem><para>
+The resolver is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STOP_PROCESS">
+<term>BIND10_STOP_PROCESS asking %1 to shut down</term>
+<listitem><para>
+The boss module is sending a shutdown command to the given module over
+the message channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_UNKNOWN_CHILD_PROCESS_ENDED">
+<term>BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</term>
+<listitem><para>
+An unknown child process has exited. The PID is printed, but no further
+action will be taken by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_WAIT_CFGMGR">
+<term>BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</term>
+<listitem><para>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing.  This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up.  The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_ENTRY_MISSING_RRSET">
+<term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
+<listitem><para>
+The cache tried to generate the complete answer message. It knows the structure
+of the message, but some of the RRsets to be put there are not in cache (they
+probably expired already). Therefore it pretends the message was not found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_FOUND">
+<term>CACHE_LOCALZONE_FOUND found entry with key %1 in local zone data</term>
+<listitem><para>
+Debug message, noting that the requested data was successfully found in the
+local zone data of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UNKNOWN">
+<term>CACHE_LOCALZONE_UNKNOWN entry with key %1 not found in local zone data</term>
+<listitem><para>
+Debug message. The requested data was not found in the local zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_LOCALZONE_UPDATE">
+<term>CACHE_LOCALZONE_UPDATE updating local zone element at key %1</term>
+<listitem><para>
+Debug message issued when there's update to the local zone section of cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_DEINIT">
+<term>CACHE_MESSAGES_DEINIT deinitialized message cache</term>
+<listitem><para>
+Debug message. It is issued when the server deinitializes the message cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_EXPIRED">
+<term>CACHE_MESSAGES_EXPIRED found an expired message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. The requested data was found in the message cache, but it
+already expired. Therefore the cache removes the entry and pretends it found
+nothing.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_FOUND">
+<term>CACHE_MESSAGES_FOUND found a message entry for %1 in the message cache</term>
+<listitem><para>
+Debug message. We found the whole message in the cache, so it can be returned
+to user without any other lookups.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_INIT">
+<term>CACHE_MESSAGES_INIT initialized message cache for %1 messages of class %2</term>
+<listitem><para>
+Debug message issued when a new message cache is issued. It lists the class
+of messages it can hold and the maximum size of the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_REMOVE">
+<term>CACHE_MESSAGES_REMOVE removing old instance of %1/%2/%3 first</term>
+<listitem><para>
+Debug message. This may follow CACHE_MESSAGES_UPDATE and indicates that, while
+updating, the old instance is being removed prior of inserting a new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNCACHEABLE">
+<term>CACHE_MESSAGES_UNCACHEABLE not inserting uncacheable message %1/%2/%3</term>
+<listitem><para>
+Debug message, noting that the given message can not be cached. This is because
+there's no SOA record in the message. See RFC 2308 section 5 for more
+information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UNKNOWN">
+<term>CACHE_MESSAGES_UNKNOWN no entry for %1 found in the message cache</term>
+<listitem><para>
+Debug message. The message cache didn't find any entry for the given key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_MESSAGES_UPDATE">
+<term>CACHE_MESSAGES_UPDATE updating message entry %1/%2/%3</term>
+<listitem><para>
+Debug message issued when the message cache is being updated with a new
+message. Either the old instance is removed or, if none is found, new one
+is created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_DEEPEST">
+<term>CACHE_RESOLVER_DEEPEST looking up deepest NS for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is looking up the deepest known nameserver,
+so the resolution doesn't have to start from the root.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT">
+<term>CACHE_RESOLVER_INIT initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message. The resolver cache is being created for this given class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_INIT_INFO">
+<term>CACHE_RESOLVER_INIT_INFO initializing resolver cache for class %1</term>
+<listitem><para>
+Debug message, the resolver cache is being created for this given class. The
+difference from CACHE_RESOLVER_INIT is only in different format of passed
+information, otherwise it does the same.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_MSG">
+<term>CACHE_RESOLVER_LOCAL_MSG message for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a complete message for the user query
+in the zone data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOCAL_RRSET">
+<term>CACHE_RESOLVER_LOCAL_RRSET RRset for %1/%2 found in local zone data</term>
+<listitem><para>
+Debug message. The resolver cache found a requested RRset in the local zone
+data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_MSG">
+<term>CACHE_RESOLVER_LOOKUP_MSG looking up message in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find a message to answer the
+user query.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_LOOKUP_RRSET">
+<term>CACHE_RESOLVER_LOOKUP_RRSET looking up RRset in resolver cache for %1/%2</term>
+<listitem><para>
+Debug message. The resolver cache is trying to find an RRset (which usually
+originates as internally from resolver).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_NO_QUESTION">
+<term>CACHE_RESOLVER_NO_QUESTION answer message for %1/%2 has empty question section</term>
+<listitem><para>
+The cache tried to fill in found data into the response message. But it
+discovered the message contains no question section, which is invalid.
+This is likely a programmer error, please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup a message in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no message is
+found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to lookup an RRset in the resolver cache, it was
+discovered there's no cache for this class at all. Therefore no data is found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_MSG">
+<term>CACHE_RESOLVER_UPDATE_MSG updating message for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating a message in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_RRSET">
+<term>CACHE_RESOLVER_UPDATE_RRSET updating RRset for %1/%2/%3</term>
+<listitem><para>
+Debug message. The resolver is updating an RRset in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_MSG no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert a message into the cache, it was
+discovered that there's no cache for the class of message. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET">
+<term>CACHE_RESOLVER_UPDATE_UNKNOWN_CLASS_RRSET no cache for class %1</term>
+<listitem><para>
+Debug message. While trying to insert an RRset into the cache, it was
+discovered that there's no cache for the class of the RRset. Therefore
+the message will not be cached.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_EXPIRED">
+<term>CACHE_RRSET_EXPIRED found expired RRset %1/%2/%3</term>
+<listitem><para>
+Debug message. The requested data was found in the RRset cache. However, it is
+expired, so the cache removed it and is going to pretend nothing was found.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_INIT">
+<term>CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2</term>
+<listitem><para>
+Debug message. The RRset cache to hold at most this many RRsets for the given
+class is being created.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_LOOKUP">
+<term>CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</term>
+<listitem><para>
+Debug message. The resolver is trying to look up data in the RRset cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_NOT_FOUND">
+<term>CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
+in the cache.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_REMOVE_OLD">
+<term>CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. During the update, the cache
+removed an old instance of the RRset to replace it with the new one.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UNTRUSTED">
+<term>CACHE_RRSET_UNTRUSTED not replacing old RRset for %1/%2/%3, it has higher trust level</term>
+<listitem><para>
+Debug message which can follow CACHE_RRSET_UPDATE. The cache already holds the
+same RRset, but from more trusted source, so the old one is kept and new one
+ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CACHE_RRSET_UPDATE">
+<term>CACHE_RRSET_UPDATE updating RRset %1/%2/%3 in the cache</term>
+<listitem><para>
+Debug message. The RRset is updating its data with this given RRset.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ASYNC_READ_FAILED">
+<term>CC_ASYNC_READ_FAILED asynchronous read failed</term>
+<listitem><para>
+This marks a low level error, we tried to read data from the message queue
+daemon asynchronously, but the ASIO library returned an error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_CONN_ERROR">
+<term>CC_CONN_ERROR error connecting to message queue (%1)</term>
+<listitem><para>
+It is impossible to reach the message queue daemon for the reason given. It
+is unlikely there'll be reason for whatever program this currently is to
+continue running, as the communication with the rest of BIND 10 is vital
+for the components.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_DISCONNECT">
+<term>CC_DISCONNECT disconnecting from message queue daemon</term>
+<listitem><para>
+The library is disconnecting from the message queue daemon. This debug message
+indicates that the program is trying to shut down gracefully.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISH">
+<term>CC_ESTABLISH trying to establish connection with message queue daemon at %1</term>
+<listitem><para>
+This debug message indicates that the command channel library is about to
+connect to the message queue daemon, which should be listening on the UNIX-domain
+socket listed in the output.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ESTABLISHED">
+<term>CC_ESTABLISHED successfully connected to message queue daemon</term>
+<listitem><para>
+This debug message indicates that the connection was successfully made, this
+should follow CC_ESTABLISH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVE">
+<term>CC_GROUP_RECEIVE trying to receive a message</term>
+<listitem><para>
+Debug message, noting that a message is expected to come over the command
+channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_RECEIVED">
+<term>CC_GROUP_RECEIVED message arrived ('%1', '%2')</term>
+<listitem><para>
+Debug message, noting that we successfully received a message (its envelope and
+payload listed). This follows CC_GROUP_RECEIVE, but might happen some time
+later, depending if we waited for it or just polled.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_GROUP_SEND">
+<term>CC_GROUP_SEND sending message '%1' to group '%2'</term>
+<listitem><para>
+Debug message, we're about to send a message over the command channel.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_INVALID_LENGTHS">
+<term>CC_INVALID_LENGTHS invalid length parameters (%1, %2)</term>
+<listitem><para>
+This happens when garbage comes over the command channel or some kind of
+confusion happens in the program. The data received from the socket make no
+sense if we interpret it as lengths of message. The first one is total length
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_LENGTH_NOT_READY">
+<term>CC_LENGTH_NOT_READY length not ready</term>
+<listitem><para>
+There should be data representing the length of message on the socket, but it
+is not there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MESSAGE">
+<term>CC_NO_MESSAGE no message ready to be received yet</term>
+<listitem><para>
+The program polled for incoming messages, but there was no message waiting.
+This is a debug message which may happen only after CC_GROUP_RECEIVE.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_NO_MSGQ">
+<term>CC_NO_MSGQ unable to connect to message queue (%1)</term>
+<listitem><para>
+It isn't possible to connect to the message queue daemon, for reason listed.
+It is unlikely any program will be able continue without the communication.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_ERROR">
+<term>CC_READ_ERROR error reading data from command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to read data from the
+command channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_READ_EXCEPTION">
+<term>CC_READ_EXCEPTION error reading data from command channel (%1)</term>
+<listitem><para>
+We received an exception while trying to read data from the command
+channel socket. The reason is listed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_REPLY">
+<term>CC_REPLY replying to message from '%1' with '%2'</term>
+<listitem><para>
+Debug message, noting we're sending a response to the original message
+with the given envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SET_TIMEOUT">
+<term>CC_SET_TIMEOUT setting timeout to %1ms</term>
+<listitem><para>
+Debug message. A timeout for which the program is willing to wait for a reply
+is being set.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_START_READ">
+<term>CC_START_READ starting asynchronous read</term>
+<listitem><para>
+Debug message. From now on, when a message (or command) comes, it'll wake the
+program and the library will automatically pass it over to correct place.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_SUBSCRIBE">
+<term>CC_SUBSCRIBE subscribing to communication group %1</term>
+<listitem><para>
+Debug message. The program wants to receive messages addressed to this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_TIMEOUT">
+<term>CC_TIMEOUT timeout reading data from command channel</term>
+<listitem><para>
+The program waited too long for data from the command channel (usually when it
+sent a query to different program and it didn't answer for whatever reason).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_UNSUBSCRIBE">
+<term>CC_UNSUBSCRIBE unsubscribing from communication group %1</term>
+<listitem><para>
+Debug message. The program no longer wants to receive messages addressed to
+this group.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_WRITE_ERROR">
+<term>CC_WRITE_ERROR error writing data to command channel (%1)</term>
+<listitem><para>
+A low level error happened when the library tried to write data to the command
+channel socket.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CC_ZERO_LENGTH">
+<term>CC_ZERO_LENGTH invalid message length (0)</term>
+<listitem><para>
+The library received a message length being zero, which makes no sense, since
+all messages must contain at least the envelope.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE">
+<term>CFGMGR_AUTOMATIC_CONFIG_DATABASE_UPDATE Updating configuration database from version %1 to %2</term>
+<listitem><para>
+An older version of the configuration database has been found, from which
+there was an automatic upgrade path to the current version. These changes
+are now applied, and no action from the administrator is necessary.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE">
+<term>CFGMGR_BAD_UPDATE_RESPONSE_FROM_MODULE Unable to parse response from module %1: %2</term>
+<listitem><para>
+The configuration manager sent a configuration update to a module, but
+the module responded with an answer that could not be parsed. The answer
+message appears to be invalid JSON data, or not decodable to a string.
+This is likely to be a problem in the module in question. The update is
+assumed to have failed, and will not be stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_CC_SESSION_ERROR">
+<term>CFGMGR_CC_SESSION_ERROR Error connecting to command channel: %1</term>
+<listitem><para>
+The configuration manager daemon was unable to connect to the messaging
+system. The most likely cause is that msgq is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_DATA_READ_ERROR">
+<term>CFGMGR_DATA_READ_ERROR error reading configuration database from disk: %1</term>
+<listitem><para>
+There was a problem reading the persistent configuration data as stored
+on disk. The file may be corrupted, or it is of a version from where
+there is no automatic upgrade path. The file needs to be repaired or
+removed. The configuration manager daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_IOERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an IO error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the directory where
+the file is stored does not exist, or is not writable. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION">
+<term>CFGMGR_OSERROR_WHILE_WRITING_CONFIGURATION Unable to write configuration file; configuration not stored: %1</term>
+<listitem><para>
+There was an OS error from the system while the configuration manager
+was trying to write the configuration database to disk. The specific
+error is given. The most likely cause is that the system does not have
+write access to the configuration database file. The updated
+configuration is not stored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CFGMGR_STOPPED_BY_KEYBOARD">
+<term>CFGMGR_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the cfgmgr daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_CONFIG_DATA">
+<term>CMDCTL_BAD_CONFIG_DATA error in config data: %1</term>
+<listitem><para>
+There was an error reading the updated configuration data. The specific
+error is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_BAD_PASSWORD">
+<term>CMDCTL_BAD_PASSWORD bad password for user: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the password was wrong.
+Users can be managed with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_ERROR">
+<term>CMDCTL_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the message bus daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_CC_SESSION_TIMEOUT">
+<term>CMDCTL_CC_SESSION_TIMEOUT timeout on cc channel</term>
+<listitem><para>
+A timeout occurred when waiting for essential data from the cc session.
+This usually occurs when b10-cfgmgr is not running or not responding.
+Since we are waiting for essential information, this is a fatal error,
+and the cmdctl daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_ERROR">
+<term>CMDCTL_COMMAND_ERROR error in command %1 to module %2: %3</term>
+<listitem><para>
+An error was encountered sending the given command to the given module.
+Either there was a communication problem with the module, or the module
+was not able to process the command, and sent back an error. The
+specific error is printed in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_COMMAND_SENT">
+<term>CMDCTL_COMMAND_SENT command '%1' to module '%2' was sent</term>
+<listitem><para>
+This debug message indicates that the given command has been sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_SUCH_USER">
+<term>CMDCTL_NO_SUCH_USER username not found in user database: %1</term>
+<listitem><para>
+A login attempt was made to b10-cmdctl, but the username was not known.
+Users can be added with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_NO_USER_ENTRIES_READ">
+<term>CMDCTL_NO_USER_ENTRIES_READ failed to read user information, all users will be denied</term>
+<listitem><para>
+The b10-cmdctl daemon was unable to find any user data in the user
+database file. Either it was unable to read the file (in which case
+this message follows a message CMDCTL_USER_DATABASE_READ_ERROR
+containing a specific error), or the file was empty. Users can be added
+with the tool b10-cmdctl-usermgr.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SEND_COMMAND">
+<term>CMDCTL_SEND_COMMAND sending command %1 to module %2</term>
+<listitem><para>
+This debug message indicates that the given command is being sent to
+the given module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_SSL_SETUP_FAILURE_USER_DENIED">
+<term>CMDCTL_SSL_SETUP_FAILURE_USER_DENIED failed to create an SSL connection (user denied): %1</term>
+<listitem><para>
+The user was denied because the SSL connection could not successfully
+be set up. The specific error is given in the log message. Possible
+causes may be that the ssl request itself was bad, or the local key or
+certificate file could not be read.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_STARTED">
+<term>CMDCTL_STARTED cmdctl is listening for connections on %1:%2</term>
+<listitem><para>
+The cmdctl daemon has started and is now listening for connections.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
+<term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
 <listitem><para>
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
+There was a keyboard interrupt signal to stop the cmdctl daemon. The
+daemon will now shut down.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="ASIODNS_UNKORIGIN">
-<term>ASIODNS_UNKORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)</term>
+<varlistentry id="CMDCTL_UNCAUGHT_EXCEPTION">
+<term>CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
 <listitem><para>
-This message should not appear and indicates an internal error if it does.
-Please enter a bug report.
+The b10-cmdctl daemon encountered an uncaught exception and
+will now shut down. This is indicative of a programming error and
+should not happen under normal circumstances. The exception message
+is printed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="ASIODNS_UNKRESULT">
-<term>ASIODNS_UNKRESULT unknown result (%1) when IOFetch::stop() was executed for I/O to %2(%3)</term>
+<varlistentry id="CMDCTL_USER_DATABASE_READ_ERROR">
+<term>CMDCTL_USER_DATABASE_READ_ERROR failed to read user database file %1: %2</term>
 <listitem><para>
-The termination method of the resolver's upstream fetch class was called with
-an unknown result code (which is given in the message).  This message should
-not appear and may indicate an internal error.  Please enter a bug report.
+The b10-cmdctl daemon was unable to read the user database file. The
+file may be unreadable for the daemon, or it may be corrupted. In the
+latter case, it can be recreated with b10-cmdctl-usermgr. The specific
+error is printed in the log message.
 </para></listitem>
 </varlistentry>
 
@@ -148,65 +1721,128 @@ The message itself is ignored by this module.
 <varlistentry id="CONFIG_CCSESSION_MSG_INTERNAL">
 <term>CONFIG_CCSESSION_MSG_INTERNAL error handling CC session message: %1</term>
 <listitem><para>
-There was an internal problem handling an incoming message on the
-command and control channel. An unexpected exception was thrown. This
-most likely points to an internal inconsistency in the module code. The
-exception message is appended to the log error, and the module will
-continue to run, but will not send back an answer.
+There was an internal problem handling an incoming message on the command
+and control channel. An unexpected exception was thrown, details of
+which are appended to the message. The module will continue to run,
+but will not send back an answer.
+</para><para>
+The most likely cause of this error is a programming error.  Please raise
+a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_GET_FAIL">
+<term>CONFIG_GET_FAIL error getting configuration from cfgmgr: %1</term>
+<listitem><para>
+The configuration manager returned an error when this module requested
+the configuration. The full error message answer from the configuration
+manager is appended to the log error. The most likely cause is that
+the module is of a different (command specification) version than the
+running configuration manager.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="CONFIG_FOPEN_ERR">
-<term>CONFIG_FOPEN_ERR error opening %1: %2</term>
+<varlistentry id="CONFIG_GET_FAILED">
+<term>CONFIG_GET_FAILED error getting configuration from cfgmgr: %1</term>
 <listitem><para>
-There was an error opening the given file.
+The configuration manager returned an error response when the module
+requested its configuration. The full error message answer from the
+configuration manager is appended to the log error.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="CONFIG_JSON_PARSE">
 <term>CONFIG_JSON_PARSE JSON parse error in %1: %2</term>
 <listitem><para>
-There was a parse error in the JSON file. The given file does not appear
+There was an error parsing the JSON file. The given file does not appear
 to be in valid JSON format. Please verify that the filename is correct
 and that the contents are valid JSON.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="CONFIG_MANAGER_CONFIG">
-<term>CONFIG_MANAGER_CONFIG error getting configuration from cfgmgr: %1</term>
+<varlistentry id="CONFIG_LOG_CONFIG_ERRORS">
+<term>CONFIG_LOG_CONFIG_ERRORS error(s) in logging configuration: %1</term>
 <listitem><para>
-The configuration manager returned an error when this module requested
-the configuration. The full error message answer from the configuration
-manager is appended to the log error. The most likely cause is that
-the module is of a different (command specification) version than the
-running configuration manager.
+There was a logging configuration update, but the internal validator
+for logging configuration found that it contained errors. The errors
+are shown, and the update is ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_EXPLICIT">
+<term>CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1</term>
+<listitem><para>
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program.  The logging
+configuration for the program will be updated with the information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_IGNORE_EXPLICIT">
+<term>CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1</term>
+<listitem><para>
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger.  As this does not match the logger specification for the
+program, it has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_IGNORE_WILD">
+<term>CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry.  The configuration is ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_LOG_WILD_MATCH">
+<term>CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1</term>
+<listitem><para>
+This is a debug message.  When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="CONFIG_MOD_SPEC_FORMAT">
+<term>CONFIG_MOD_SPEC_FORMAT module specification error in %1: %2</term>
+<listitem><para>
+The given file does not appear to be a valid specification file: details
+are included in the message. Please verify that the filename is correct
+and that its contents are a valid BIND10 module specification.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="CONFIG_MANAGER_MOD_SPEC">
-<term>CONFIG_MANAGER_MOD_SPEC module specification not accepted by cfgmgr: %1</term>
+<varlistentry id="CONFIG_MOD_SPEC_REJECT">
+<term>CONFIG_MOD_SPEC_REJECT module specification rejected by cfgmgr: %1</term>
 <listitem><para>
-The module specification file for this module was rejected by the
-configuration manager. The full error message answer from the
-configuration manager is appended to the log error. The most likely
-cause is that the module is of a different (specification file) version
-than the running configuration manager.
+The specification file for this module was rejected by the configuration
+manager. The full error message answer from the configuration manager is
+appended to the log error. The most likely cause is that the module is of
+a different (specification file) version than the running configuration
+manager.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="CONFIG_MODULE_SPEC">
-<term>CONFIG_MODULE_SPEC module specification error in %1: %2</term>
+<varlistentry id="CONFIG_OPEN_FAIL">
+<term>CONFIG_OPEN_FAIL error opening %1: %2</term>
 <listitem><para>
-The given file does not appear to be a valid specification file. Please
-verify that the filename is correct and that its contents are a valid
-BIND10 module specification.
+There was an error opening the given file. The reason for the failure
+is included in the message.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_CREATE">
 <term>DATASRC_CACHE_CREATE creating the hotspot cache</term>
 <listitem><para>
-Debug information that the hotspot cache was created at startup.
+This is a debug message issued during startup when the hotspot cache
+is created.
 </para></listitem>
 </varlistentry>
 
@@ -218,39 +1854,37 @@ Debug information. The hotspot cache is being destroyed.
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_DISABLE">
-<term>DATASRC_CACHE_DISABLE disabling the cache</term>
+<term>DATASRC_CACHE_DISABLE disabling the hotspot cache</term>
 <listitem><para>
-The hotspot cache is disabled from now on. It is not going to store
-information or return anything.
+A debug message issued when the hotspot cache is disabled.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_ENABLE">
-<term>DATASRC_CACHE_ENABLE enabling the cache</term>
+<term>DATASRC_CACHE_ENABLE enabling the hotspot cache</term>
 <listitem><para>
-The hotspot cache is enabled from now on.
+A debug message issued when the hotspot cache is enabled.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_EXPIRED">
-<term>DATASRC_CACHE_EXPIRED the item '%1' is expired</term>
+<term>DATASRC_CACHE_EXPIRED item '%1' in the hotspot cache has expired</term>
 <listitem><para>
-Debug information. There was an attempt to look up an item in the hotspot
-cache. And the item was actually there, but it was too old, so it was removed
-instead and nothing is reported (the external behaviour is the same as with
-CACHE_NOT_FOUND).
+A debug message issued when a hotspot cache lookup located the item but it
+had expired.  The item was removed and the program proceeded as if the item
+had not been found.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_FOUND">
 <term>DATASRC_CACHE_FOUND the item '%1' was found</term>
 <listitem><para>
-Debug information. An item was successfully looked up in the hotspot cache.
+Debug information. An item was successfully located in the hotspot cache.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_FULL">
-<term>DATASRC_CACHE_FULL cache is full, dropping oldest</term>
+<term>DATASRC_CACHE_FULL hotspot cache is full, dropping oldest</term>
 <listitem><para>
 Debug information. After inserting an item into the hotspot cache, the
 maximum number of items was exceeded, so the least recently used item will
@@ -259,39 +1893,39 @@ be dropped. This should be directly followed by CACHE_REMOVE.
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_INSERT">
-<term>DATASRC_CACHE_INSERT inserting item '%1' into the cache</term>
+<term>DATASRC_CACHE_INSERT inserting item '%1' into the hotspot cache</term>
 <listitem><para>
-Debug information. It means a new item is being inserted into the hotspot
+A debug message indicating that a new item is being inserted into the hotspot
 cache.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_NOT_FOUND">
-<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found</term>
+<term>DATASRC_CACHE_NOT_FOUND the item '%1' was not found in the hotspot cache</term>
 <listitem><para>
-Debug information. It was attempted to look up an item in the hotspot cache,
-but it is not there.
+A debug message issued when hotspot cache was searched for the specified
+item but it was not found.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_OLD_FOUND">
-<term>DATASRC_CACHE_OLD_FOUND older instance of cache item found, replacing</term>
+<term>DATASRC_CACHE_OLD_FOUND older instance of hotspot cache item '%1' found, replacing</term>
 <listitem><para>
 Debug information. While inserting an item into the hotspot cache, an older
-instance of an item with the same name was found. The old instance will be
-removed. This should be directly followed by CACHE_REMOVE.
+instance of an item with the same name was found; the old instance will be
+removed. This will be directly followed by CACHE_REMOVE.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_REMOVE">
-<term>DATASRC_CACHE_REMOVE removing '%1' from the cache</term>
+<term>DATASRC_CACHE_REMOVE removing '%1' from the hotspot cache</term>
 <listitem><para>
 Debug information. An item is being removed from the hotspot cache.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_CACHE_SLOTS">
-<term>DATASRC_CACHE_SLOTS setting the cache size to '%1', dropping '%2' items</term>
+<term>DATASRC_CACHE_SLOTS setting the hotspot cache size to '%1', dropping '%2' items</term>
 <listitem><para>
 The maximum allowed number of items of the hotspot cache is set to the given
 number. If there are too many, some of them will be dropped. The size of 0
@@ -299,11 +1933,268 @@ means no limit.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED">
+<term>DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2</term>
+<listitem><para>
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_RECORDS">
+<term>DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</term>
+<listitem><para>
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FIND_TTL_MISMATCH">
+<term>DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</term>
+<listitem><para>
+The datasource backend provided resource records for the given RRset with
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION">
+<term>DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DELEGATION_EXACT">
+<term>DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1</term>
+<listitem><para>
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_DNAME">
+<term>DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1</term>
+<listitem><para>
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL">
+<term>DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1</term>
+<listitem><para>
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXDOMAIN">
+<term>DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_NXRRSET">
+<term>DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_FOUND_RRSET">
+<term>DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2</term>
+<listitem><para>
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE">
+<term>DATASRC_DATABASE_ITERATE iterating zone %1</term>
+<listitem><para>
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_END">
+<term>DATASRC_DATABASE_ITERATE_END iterating zone finished</term>
+<listitem><para>
+While iterating through the zone, the program reached end of the data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_NEXT">
+<term>DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2</term>
+<listitem><para>
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_ITERATE_TTL_MISMATCH">
+<term>DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4</term>
+<listitem><para>
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_END">
+<term>DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences.  The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_NEXT">
+<term>DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</term>
+<listitem><para>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset.  The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_START">
+<term>DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source.  The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADR_BADDATA">
+<term>DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</term>
+<listitem><para>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset.  The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message.  The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_COMMIT">
+<term>DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</term>
+<listitem><para>
+Debug information.  A set of updates to a zone has been successfully
+committed to the corresponding database backend.  The zone name,
+its class and the database name are printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_CREATED">
+<term>DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3</term>
+<listitem><para>
+Debug information.  A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_DESTROYED">
+<term>DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3</term>
+<listitem><para>
+Debug information.  A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_ROLLBACK">
+<term>DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3</term>
+<listitem><para>
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes.  The intermediate changes made through the updater won't
+be applied to the underlying database.  The zone name, its class, and
+the underlying database name are shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_UPDATER_ROLLBACKFAIL">
+<term>DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4</term>
+<listitem><para>
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails.  The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation.  In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid.  The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD">
+<term>DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1</term>
+<listitem><para>
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_CANCEL_NS">
+<term>DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1</term>
+<listitem><para>
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_CANCEL_SUB">
+<term>DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1</term>
+<listitem><para>
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_WILDCARD_EMPTY">
+<term>DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1</term>
+<listitem><para>
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="DATASRC_DO_QUERY">
 <term>DATASRC_DO_QUERY handling query for '%1/%2'</term>
 <listitem><para>
-Debug information. We're processing some internal query for given name and
-type.
+A debug message indicating that a query for the given name and RR type is being
+processed.
 </para></listitem>
 </varlistentry>
 
@@ -317,8 +2208,9 @@ Debug information. An RRset is being added to the in-memory data source.
 <varlistentry id="DATASRC_MEM_ADD_WILDCARD">
 <term>DATASRC_MEM_ADD_WILDCARD adding wildcards for '%1'</term>
 <listitem><para>
-Debug information. Some special marks above each * in wildcard name are needed.
-They are being added now for this name.
+This is a debug message issued during the processing of a wildcard
+name. The internal domain name tree is scanned and some nodes are
+specially marked to allow the wildcard lookup to succeed.
 </para></listitem>
 </varlistentry>
 
@@ -349,7 +2241,7 @@ returning the CNAME instead.
 <term>DATASRC_MEM_CNAME_COEXIST can't add data to CNAME in domain '%1'</term>
 <listitem><para>
 This is the same problem as in MEM_CNAME_TO_NONEMPTY, but it happened the
-other way around -- adding some outher data to CNAME.
+other way around -- adding some other data to CNAME.
 </para></listitem>
 </varlistentry>
 
@@ -401,11 +2293,11 @@ Debug information. A DNAME was found instead of the requested information.
 </varlistentry>
 
 <varlistentry id="DATASRC_MEM_DNAME_NS">
-<term>DATASRC_MEM_DNAME_NS dNAME and NS can't coexist in non-apex domain '%1'</term>
+<term>DATASRC_MEM_DNAME_NS DNAME and NS can't coexist in non-apex domain '%1'</term>
 <listitem><para>
-It was requested for DNAME and NS records to be put into the same domain
-which is not the apex (the top of the zone). This is forbidden by RFC
-2672, section 3. This indicates a problem with provided data.
+A request was made for DNAME and NS records to be put into the same
+domain which is not the apex (the top of the zone). This is forbidden
+by RFC 2672 (section 3) and indicates a problem with provided data.
 </para></listitem>
 </varlistentry>
 
@@ -457,8 +2349,8 @@ Debug information. The content of master file is being loaded into the memory.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="DATASRC_MEM_NOTFOUND">
-<term>DATASRC_MEM_NOTFOUND requested domain '%1' not found</term>
+<varlistentry id="DATASRC_MEM_NOT_FOUND">
+<term>DATASRC_MEM_NOT_FOUND requested domain '%1' not found</term>
 <listitem><para>
 Debug information. The requested domain does not exist.
 </para></listitem>
@@ -544,7 +2436,7 @@ behaviour is specified by RFC 1034, section 4.3.3
 </varlistentry>
 
 <varlistentry id="DATASRC_MEM_WILDCARD_DNAME">
-<term>DATASRC_MEM_WILDCARD_DNAME dNAME record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_DNAME DNAME record in wildcard domain '%1'</term>
 <listitem><para>
 The software refuses to load DNAME records into a wildcard domain.  It isn't
 explicitly forbidden, but the protocol is ambiguous about how this should
@@ -554,7 +2446,7 @@ different tools.
 </varlistentry>
 
 <varlistentry id="DATASRC_MEM_WILDCARD_NS">
-<term>DATASRC_MEM_WILDCARD_NS nS record in wildcard domain '%1'</term>
+<term>DATASRC_MEM_WILDCARD_NS NS record in wildcard domain '%1'</term>
 <listitem><para>
 The software refuses to load NS records into a wildcard domain.  It isn't
 explicitly forbidden, but the protocol is ambiguous about how this should
@@ -566,15 +2458,15 @@ different tools.
 <varlistentry id="DATASRC_META_ADD">
 <term>DATASRC_META_ADD adding a data source into meta data source</term>
 <listitem><para>
-Debug information. Yet another data source is being added into the meta data
-source. (probably at startup or reconfiguration)
+This is a debug message issued during startup or reconfiguration.
+Another data source is being added into the meta data source.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_META_ADD_CLASS_MISMATCH">
 <term>DATASRC_META_ADD_CLASS_MISMATCH mismatch between classes '%1' and '%2'</term>
 <listitem><para>
-It was attempted to add a data source into a meta data source. But their
+It was attempted to add a data source into a meta data source, but their
 classes do not match.
 </para></listitem>
 </varlistentry>
@@ -634,7 +2526,7 @@ information for it.
 </varlistentry>
 
 <varlistentry id="DATASRC_QUERY_CACHED">
-<term>DATASRC_QUERY_CACHED data for %1/%2 found in cache</term>
+<term>DATASRC_QUERY_CACHED data for %1/%2 found in hotspot cache</term>
 <listitem><para>
 Debug information. The requested data were found in the hotspot cache, so
 no query is sent to the real data source.
@@ -642,7 +2534,7 @@ no query is sent to the real data source.
 </varlistentry>
 
 <varlistentry id="DATASRC_QUERY_CHECK_CACHE">
-<term>DATASRC_QUERY_CHECK_CACHE checking cache for '%1/%2'</term>
+<term>DATASRC_QUERY_CHECK_CACHE checking hotspot cache for '%1/%2'</term>
 <listitem><para>
 Debug information. While processing a query, lookup to the hotspot cache
 is being made.
@@ -666,12 +2558,11 @@ way down to the given domain.
 </varlistentry>
 
 <varlistentry id="DATASRC_QUERY_EMPTY_CNAME">
-<term>DATASRC_QUERY_EMPTY_CNAME cNAME at '%1' is empty</term>
+<term>DATASRC_QUERY_EMPTY_CNAME CNAME at '%1' is empty</term>
 <listitem><para>
-There was an CNAME and it was being followed. But it contains no records,
-so there's nowhere to go. There will be no answer. This indicates a problem
-with supplied data.
-We tried to follow
+A CNAME chain was being followed and an entry was found that pointed
+to a domain name that had no RRsets associated with it. As a result,
+the query cannot be answered. This indicates a problem with supplied data.
 </para></listitem>
 </varlistentry>
 
@@ -687,15 +2578,15 @@ DNAME is empty (it has no records). This indicates problem with supplied data.
 <term>DATASRC_QUERY_FAIL query failed</term>
 <listitem><para>
 Some subtask of query processing failed. The reason should have been reported
-already. We are returning SERVFAIL.
+already and a SERVFAIL will be returned to the querying system.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_QUERY_FOLLOW_CNAME">
 <term>DATASRC_QUERY_FOLLOW_CNAME following CNAME at '%1'</term>
 <listitem><para>
-Debug information. The domain is a CNAME (or a DNAME and we created a CNAME
-for it already), so it's being followed.
+Debug information. The domain is a CNAME (or a DNAME and a CNAME for it
+has already been created) and the search is following this chain.
 </para></listitem>
 </varlistentry>
 
@@ -744,14 +2635,14 @@ Debug information. The last DO_QUERY is an auth query.
 <varlistentry id="DATASRC_QUERY_IS_GLUE">
 <term>DATASRC_QUERY_IS_GLUE glue query (%1/%2)</term>
 <listitem><para>
-Debug information. The last DO_QUERY is query for glue addresses.
+Debug information. The last DO_QUERY is a query for glue addresses.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_QUERY_IS_NOGLUE">
 <term>DATASRC_QUERY_IS_NOGLUE query for non-glue addresses (%1/%2)</term>
 <listitem><para>
-Debug information. The last DO_QUERY is query for addresses that are not
+Debug information. The last DO_QUERY is a query for addresses that are not
 glue.
 </para></listitem>
 </varlistentry>
@@ -759,7 +2650,7 @@ glue.
 <varlistentry id="DATASRC_QUERY_IS_REF">
 <term>DATASRC_QUERY_IS_REF query for referral (%1/%2)</term>
 <listitem><para>
-Debug information. The last DO_QUERY is query for referral information.
+Debug information. The last DO_QUERY is a query for referral information.
 </para></listitem>
 </varlistentry>
 
@@ -806,7 +2697,7 @@ error already.
 </varlistentry>
 
 <varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_AUTH">
-<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<term>DATASRC_QUERY_NO_CACHE_ANY_AUTH ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
 <listitem><para>
 Debug information. The hotspot cache is ignored for authoritative ANY queries
 for consistency reasons.
@@ -814,7 +2705,7 @@ for consistency reasons.
 </varlistentry>
 
 <varlistentry id="DATASRC_QUERY_NO_CACHE_ANY_SIMPLE">
-<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring cache for ANY query (%1/%2 in %3 class)</term>
+<term>DATASRC_QUERY_NO_CACHE_ANY_SIMPLE ignoring hotspot cache for ANY query (%1/%2 in %3 class)</term>
 <listitem><para>
 Debug information. The hotspot cache is ignored for ANY queries for consistency
 reasons.
@@ -852,8 +2743,8 @@ Debug information. A sure query is being processed now.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="DATASRC_QUERY_PROVENX_FAIL">
-<term>DATASRC_QUERY_PROVENX_FAIL unable to prove nonexistence of '%1'</term>
+<varlistentry id="DATASRC_QUERY_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_PROVE_NX_FAIL unable to prove nonexistence of '%1'</term>
 <listitem><para>
 The user wants DNSSEC and we discovered the entity doesn't exist (either
 domain or the record). But there was an error getting NSEC/NSEC3 record
@@ -890,9 +2781,9 @@ error already.
 <varlistentry id="DATASRC_QUERY_SYNTH_CNAME">
 <term>DATASRC_QUERY_SYNTH_CNAME synthesizing CNAME from DNAME on '%1'</term>
 <listitem><para>
-Debug information. While answering a query, a DNAME was met. The DNAME itself
-will be returned, but along with it a CNAME for clients which don't understand
-DNAMEs will be synthesized.
+This is a debug message. While answering a query, a DNAME was encountered. The
+DNAME itself will be returned, along with a synthesized CNAME for clients that
+do not understand the DNAME RR.
 </para></listitem>
 </varlistentry>
 
@@ -905,7 +2796,7 @@ already. The code is 1 for error, 2 for not implemented.
 </varlistentry>
 
 <varlistentry id="DATASRC_QUERY_TOO_MANY_CNAMES">
-<term>DATASRC_QUERY_TOO_MANY_CNAMES cNAME chain limit exceeded at '%1'</term>
+<term>DATASRC_QUERY_TOO_MANY_CNAMES CNAME chain limit exceeded at '%1'</term>
 <listitem><para>
 A CNAME led to another CNAME and it led to another, and so on. After 16
 CNAMEs, the software gave up. Long CNAME chains are discouraged, and this
@@ -938,8 +2829,8 @@ exact kind was hopefully already reported.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="DATASRC_QUERY_WILDCARD_PROVENX_FAIL">
-<term>DATASRC_QUERY_WILDCARD_PROVENX_FAIL unable to prove nonexistence of '%1' (%2)</term>
+<varlistentry id="DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL">
+<term>DATASRC_QUERY_WILDCARD_PROVE_NX_FAIL unable to prove nonexistence of '%1' (%2)</term>
 <listitem><para>
 While processing a wildcard, it wasn't possible to prove nonexistence of the
 given domain or record.  The code is 1 for error and 2 for not implemented.
@@ -961,32 +2852,53 @@ Debug information. The SQLite data source is closing the database file.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="DATASRC_SQLITE_CONNCLOSE">
+<term>DATASRC_SQLITE_CONNCLOSE Closing sqlite database</term>
+<listitem><para>
+The database file is no longer needed and is being closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_SQLITE_CONNOPEN">
+<term>DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'</term>
+<listitem><para>
+The database file is being opened so it can start providing data.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="DATASRC_SQLITE_CREATE">
-<term>DATASRC_SQLITE_CREATE sQLite data source created</term>
+<term>DATASRC_SQLITE_CREATE SQLite data source created</term>
 <listitem><para>
 Debug information. An instance of SQLite data source is being created.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_SQLITE_DESTROY">
-<term>DATASRC_SQLITE_DESTROY sQLite data source destroyed</term>
+<term>DATASRC_SQLITE_DESTROY SQLite data source destroyed</term>
 <listitem><para>
 Debug information. An instance of SQLite data source is being destroyed.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="DATASRC_SQLITE_DROPCONN">
+<term>DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized</term>
+<listitem><para>
+The object around a database connection is being destroyed.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="DATASRC_SQLITE_ENCLOSURE">
 <term>DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'</term>
 <listitem><para>
-Debug information. The SQLite data source is trying to identify, which zone
+Debug information. The SQLite data source is trying to identify which zone
 should hold this domain.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOTFOUND">
-<term>DATASRC_SQLITE_ENCLOSURE_NOTFOUND no zone contains it</term>
+<varlistentry id="DATASRC_SQLITE_ENCLOSURE_NOT_FOUND">
+<term>DATASRC_SQLITE_ENCLOSURE_NOT_FOUND no zone contains '%1'</term>
 <listitem><para>
-Debug information. The last SQLITE_ENCLOSURE query was unsuccessful, there's
+Debug information. The last SQLITE_ENCLOSURE query was unsuccessful; there's
 no such zone in our data.
 </para></listitem>
 </varlistentry>
@@ -1050,7 +2962,7 @@ a referral and where it goes.
 <varlistentry id="DATASRC_SQLITE_FINDREF_BAD_CLASS">
 <term>DATASRC_SQLITE_FINDREF_BAD_CLASS class mismatch looking for referral ('%1' and '%2')</term>
 <listitem><para>
-The SQLite data source was trying to identify, if there's a referral. But
+The SQLite data source was trying to identify if there's a referral. But
 it contains different class than the query was for.
 </para></listitem>
 </varlistentry>
@@ -1079,6 +2991,13 @@ But it doesn't contain that zone.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="DATASRC_SQLITE_NEWCONN">
+<term>DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized</term>
+<listitem><para>
+A wrapper object to hold database connection is being initialized.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="DATASRC_SQLITE_OPEN">
 <term>DATASRC_SQLITE_OPEN opening SQLite database '%1'</term>
 <listitem><para>
@@ -1090,15 +3009,22 @@ the provided file.
 <varlistentry id="DATASRC_SQLITE_PREVIOUS">
 <term>DATASRC_SQLITE_PREVIOUS looking for name previous to '%1'</term>
 <listitem><para>
-Debug information. We're trying to look up name preceding the supplied one.
+This is a debug message.  The name given was not found, so the program
+is searching for the next name higher up the hierarchy (e.g. if
+www.example.com were queried for and not found, the software searches
+for the "previous" name, example.com).
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="DATASRC_SQLITE_PREVIOUS_NO_ZONE">
 <term>DATASRC_SQLITE_PREVIOUS_NO_ZONE no zone containing '%1'</term>
 <listitem><para>
-The SQLite data source tried to identify name preceding this one. But this
-one is not contained in any zone in the data source.
+The name given was not found, so the program is searching for the next
+name higher up the hierarchy (e.g. if www.example.com were queried
+for and not found, the software searches for the "previous" name,
+example.com). However, this name is not contained in any zone in the
+data source. This is an error since it indicates a problem in the earlier
+processing of the query.
 </para></listitem>
 </varlistentry>
 
@@ -1111,11 +3037,11 @@ no data, but it will be ready for use.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="DATASRC_STATIC_BAD_CLASS">
-<term>DATASRC_STATIC_BAD_CLASS static data source can handle CH only</term>
+<varlistentry id="DATASRC_STATIC_CLASS_NOT_CH">
+<term>DATASRC_STATIC_CLASS_NOT_CH static data source can handle CH class only</term>
 <listitem><para>
-For some reason, someone asked the static data source a query that is not in
-the CH class.
+An error message indicating that a query requesting a RR for a class other
+that CH was sent to the static data source (which only handles CH queries).
 </para></listitem>
 </varlistentry>
 
@@ -1143,294 +3069,496 @@ generated.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="LOGIMPL_ABOVEDBGMAX">
-<term>LOGIMPL_ABOVEDBGMAX debug level of %1 is too high and will be set to the maximum of %2</term>
+<varlistentry id="LIBXFRIN_DIFFERENT_TTL">
+<term>LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.</term>
+<listitem><para>
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LIBXFRIN_NO_JOURNAL">
+<term>LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</term>
+<listitem><para>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled.  At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR).  Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
+<term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
 <listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is above the maximum allowed value and has
-been reduced to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is above the maximum allowed value and has
+been reduced to that value.  The appearance of this message may indicate
+a programming error - please submit a bug report.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="LOGIMPL_BADDEBUG">
-<term>LOGIMPL_BADDEBUG debug string is '%1': must be of the form DEBUGn</term>
+<varlistentry id="LOGIMPL_BAD_DEBUG_STRING">
+<term>LOGIMPL_BAD_DEBUG_STRING debug string '%1' has invalid format</term>
 <listitem><para>
-The string indicating the extended logging level (used by the underlying
-logger implementation code) is not of the stated form.  In particular,
-it starts DEBUG but does not end with an integer.
+A message from the interface to the underlying logger implementation
+reporting that an internally-created string used to set the debug level
+is not of the correct format (it should be of the form DEBUGn, where n
+is an integer, e.g. DEBUG22).  The appearance of this message indicates
+a programming error - please submit a bug report.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="LOGIMPL_BELOWDBGMIN">
-<term>LOGIMPL_BELOWDBGMIN debug level of %1 is too low and will be set to the minimum of %2</term>
+<varlistentry id="LOGIMPL_BELOW_MIN_DEBUG">
+<term>LOGIMPL_BELOW_MIN_DEBUG debug level of %1 is too low and will be set to the minimum of %2</term>
 <listitem><para>
-A message from the underlying logger implementation code, the debug level
-(as set by the string DEBGUGn) is below the minimum allowed value and has
-been increased to that value.
+A message from the interface to the underlying logger implementation reporting
+that the debug level (as set by an internally-created string DEBUGn, where n
+is an integer, e.g. DEBUG22) is below the minimum allowed value and has
+been increased to that value.  The appearance of this message may indicate
+a programming error - please submit a bug report.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_BADDESTINATION">
-<term>MSG_BADDESTINATION unrecognized log destination: %1</term>
+<varlistentry id="LOG_BAD_DESTINATION">
+<term>LOG_BAD_DESTINATION unrecognized log destination: %1</term>
 <listitem><para>
 A logger destination value was given that was not recognized. The
 destination should be one of "console", "file", or "syslog".
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_BADSEVERITY">
-<term>MSG_BADSEVERITY unrecognized log severity: %1</term>
+<varlistentry id="LOG_BAD_SEVERITY">
+<term>LOG_BAD_SEVERITY unrecognized log severity: %1</term>
 <listitem><para>
 A logger severity value was given that was not recognized. The severity
-should be one of "DEBUG", "INFO", "WARN", "ERROR", or "FATAL".
+should be one of "DEBUG", "INFO", "WARN", "ERROR", "FATAL" or "NONE".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_BAD_STREAM">
+<term>LOG_BAD_STREAM bad log console output stream: %1</term>
+<listitem><para>
+Logging has been configured so that output is written to the terminal
+(console) but the stream on which it is to be written is not recognised.
+Allowed values are "stdout" and "stderr".
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_DUPLICATE_MESSAGE_ID">
+<term>LOG_DUPLICATE_MESSAGE_ID duplicate message ID (%1) in compiled code</term>
+<listitem><para>
+During start-up, BIND 10 detected that the given message identification
+had been defined multiple times in the BIND 10 code.  This indicates a
+programming error; please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_DUPLICATE_NAMESPACE">
+<term>LOG_DUPLICATE_NAMESPACE line %1: duplicate $NAMESPACE directive found</term>
+<listitem><para>
+When reading a message file, more than one $NAMESPACE directive was found.
+(This directive is used to set a C++ namespace when generating header
+files during software development.)  Such a condition is regarded as an
+error and the read will be abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_INPUT_OPEN_FAIL">
+<term>LOG_INPUT_OPEN_FAIL unable to open message file %1 for input: %2</term>
+<listitem><para>
+The program was not able to open the specified input message file for
+the reason given.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_INVALID_MESSAGE_ID">
+<term>LOG_INVALID_MESSAGE_ID line %1: invalid message identification '%2'</term>
+<listitem><para>
+An invalid message identification (ID) has been found during the read of
+a message file.  Message IDs should comprise only alphanumeric characters
+and the underscore, and should not start with a digit.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NAMESPACE_EXTRA_ARGS">
+<term>LOG_NAMESPACE_EXTRA_ARGS line %1: $NAMESPACE directive has too many arguments</term>
+<listitem><para>
+The $NAMESPACE directive in a message file takes a single argument, a
+namespace in which all the generated symbol names are placed.  This error
+is generated when the compiler finds a $NAMESPACE directive with more
+than one argument.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_BADSTREAM">
-<term>MSG_BADSTREAM bad log console output stream: %1</term>
+<varlistentry id="LOG_NAMESPACE_INVALID_ARG">
+<term>LOG_NAMESPACE_INVALID_ARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
 <listitem><para>
-A log console output stream was given that was not recognized. The
-output stream should be one of "stdout", or "stderr"
+The $NAMESPACE argument in a message file should be a valid C++ namespace.
+This message is output if the simple check on the syntax of the string
+carried out by the reader fails.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_DUPLNS">
-<term>MSG_DUPLNS line %1: duplicate $NAMESPACE directive found</term>
+<varlistentry id="LOG_NAMESPACE_NO_ARGS">
+<term>LOG_NAMESPACE_NO_ARGS line %1: no arguments were given to the $NAMESPACE directive</term>
 <listitem><para>
-When reading a message file, more than one $NAMESPACE directive was found.  In
-this version of the code, such a condition is regarded as an error and the
-read will be abandoned.
+The $NAMESPACE directive in a message file takes a single argument,
+a C++ namespace in which all the generated symbol names are placed.
+This error is generated when the compiler finds a $NAMESPACE directive
+with no arguments.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_DUPMSGID">
-<term>MSG_DUPMSGID duplicate message ID (%1) in compiled code</term>
+<varlistentry id="LOG_NO_MESSAGE_ID">
+<term>LOG_NO_MESSAGE_ID line %1: message definition line found without a message ID</term>
 <listitem><para>
-Indicative of a programming error, when it started up, BIND10 detected that
-the given message ID had been registered by one or more modules.  (All message
-IDs should be unique throughout BIND10.)  This has no impact on the operation
-of the server other that erroneous messages may be logged.  (When BIND10 loads
-the message IDs (and their associated text), if a duplicate ID is found it is
-discarded.  However, when the module that supplied the duplicate ID logs that
-particular message, the text supplied by the module that added the original
-ID will be output - something that may bear no relation to the condition being
-logged.
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message.  This error indicates the message compiler found a line in
+the message file comprising just the "%" and nothing else.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_IDNOTFND">
-<term>MSG_IDNOTFND could not replace message text for '%1': no such message</term>
+<varlistentry id="LOG_NO_MESSAGE_TEXT">
+<term>LOG_NO_MESSAGE_TEXT line %1: line found containing a message ID ('%2') and no text</term>
+<listitem><para>
+Within a message file, message are defined by lines starting with a "%".
+The rest of the line should comprise the message ID and text describing
+the message.  This error indicates the message compiler found a line
+in the message file comprising just the "%" and message identification,
+but no text.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="LOG_NO_SUCH_MESSAGE">
+<term>LOG_NO_SUCH_MESSAGE could not replace message text for '%1': no such message</term>
 <listitem><para>
 During start-up a local message file was read.  A line with the listed
-message identification was found in the file, but the identification is not
-one contained in the compiled-in message dictionary.  Either the message
-identification has been mis-spelled in the file, or the local file was used
-for an earlier version of the software and the message with that
-identification has been removed.
+message identification was found in the file, but the identification is
+not one contained in the compiled-in message dictionary.  This message
+may appear a number of times in the file, once for every such unknown
+message identification.
+</para><para>
+There may be several reasons why this message may appear:
+</para><para>
+- The message ID has been mis-spelled in the local message file.
 </para><para>
-This message may appear a number of times in the file, once for every such
-unknown message identification.
+- The program outputting the message may not use that particular message
+(e.g. it originates in a module not used by the program.)
+</para><para>
+- The local file was written for an earlier version of the BIND 10 software
+and the later version no longer generates that message.
+</para><para>
+Whatever the reason, there is no impact on the operation of BIND 10.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_INVMSGID">
-<term>MSG_INVMSGID line %1: invalid message identification '%2'</term>
+<varlistentry id="LOG_OPEN_OUTPUT_FAIL">
+<term>LOG_OPEN_OUTPUT_FAIL unable to open %1 for output: %2</term>
 <listitem><para>
-The concatenation of the prefix and the message identification is used as
-a symbol in the C++ module; as such it may only contain
+Originating within the logging code, the program was not able to open
+the specified output file for the reason given.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_NOMSGID">
-<term>MSG_NOMSGID line %1: message definition line found without a message ID</term>
+<varlistentry id="LOG_PREFIX_EXTRA_ARGS">
+<term>LOG_PREFIX_EXTRA_ARGS line %1: $PREFIX directive has too many arguments</term>
 <listitem><para>
-Message definition lines are lines starting with a "%".  The rest of the line
-should comprise the message ID and text describing the message.  This error
-indicates the message compiler found a line in the message file comprising
-just the "%" and nothing else.
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+This error is generated when the compiler finds a $PREFIX directive with
+more than one argument.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_NOMSGTXT">
-<term>MSG_NOMSGTXT line %1: line found containing a message ID ('%2') and no text</term>
+<varlistentry id="LOG_PREFIX_INVALID_ARG">
+<term>LOG_PREFIX_INVALID_ARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
 <listitem><para>
-Message definition lines are lines starting with a "%".  The rest of the line
-should comprise the message ID and text describing the message.  This error
-is generated when a line is found in the message file that contains the
-leading "%" and the message identification but no text.
+Within a message file, the $PREFIX directive takes a single argument,
+a prefix to be added to the symbol names when a C++ file is created.
+As such, it must adhere to restrictions on C++ symbol names (e.g. may
+only contain alphanumeric characters or underscores, and may nor start
+with a digit).  A $PREFIX directive was found with an argument (given
+in the message) that violates those restrictions.
+</para><para>
+Note: the $PREFIX directive is deprecated and will be removed in a future
+version of BIND 10.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_NSEXTRARG">
-<term>MSG_NSEXTRARG line %1: $NAMESPACE directive has too many arguments</term>
+<varlistentry id="LOG_READING_LOCAL_FILE">
+<term>LOG_READING_LOCAL_FILE reading local message file %1</term>
 <listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed.  This error is generated when the
-compiler finds a $NAMESPACE directive with more than one argument.
+This is an informational message output by BIND 10 when it starts to read
+a local message file.  (A local message file may replace the text of
+one of more messages; the ID of the message will not be changed though.)
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_NSINVARG">
-<term>MSG_NSINVARG line %1: $NAMESPACE directive has an invalid argument ('%2')</term>
+<varlistentry id="LOG_READ_ERROR">
+<term>LOG_READ_ERROR error reading from message file %1: %2</term>
 <listitem><para>
-The $NAMESPACE argument should be a valid C++ namespace.  The reader does a
-cursory check on its validity, checking that the characters in the namespace
-are correct.  The error is generated when the reader finds an invalid
-character. (Valid are alphanumeric characters, underscores and colons.)
+The specified error was encountered reading from the named message file.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_NSNOARG">
-<term>MSG_NSNOARG line %1: no arguments were given to the $NAMESPACE directive</term>
+<varlistentry id="LOG_UNRECOGNISED_DIRECTIVE">
+<term>LOG_UNRECOGNISED_DIRECTIVE line %1: unrecognised directive '%2'</term>
 <listitem><para>
-The $NAMESPACE directive takes a single argument, a namespace in which all the
-generated symbol names are placed.  This error is generated when the
-compiler finds a $NAMESPACE directive with no arguments.
+Within a message file, a line starting with a dollar symbol was found
+(indicating the presence of a directive) but the first word on the line
+(shown in the message) was not recognised.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_OPENIN">
-<term>MSG_OPENIN unable to open message file %1 for input: %2</term>
+<varlistentry id="LOG_WRITE_ERROR">
+<term>LOG_WRITE_ERROR error writing to %1: %2</term>
 <listitem><para>
-The program was not able to open the specified input message file for the
-reason given.
+The specified error was encountered by the message compiler when writing
+to the named output file.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_OPENOUT">
-<term>MSG_OPENOUT unable to open %1 for output: %2</term>
+<varlistentry id="NOTIFY_OUT_DATASRC_ACCESS_FAILURE">
+<term>NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</term>
 <listitem><para>
-The program was not able to open the specified output file for the reason
-given.
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message.  This can be either a
+configuration error or installation setup failure.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_PRFEXTRARG">
-<term>MSG_PRFEXTRARG line %1: $PREFIX directive has too many arguments</term>
+<varlistentry id="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND">
+<term>NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</term>
 <listitem><para>
-The $PREFIX directive takes a single argument, a prefix to be added to the
-symbol names when a C++ .h file is created.  This error is generated when the
-compiler finds a $PREFIX directive with more than one argument.
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source.  This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process.  So this means some critical inconsistency in the data
+source or software bug.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_PRFINVARG">
-<term>MSG_PRFINVARG line %1: $PREFIX directive has an invalid argument ('%2')</term>
+<varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
+<term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
 <listitem><para>
-The $PREFIX argument is used in a symbol name in a C++ header file.  As such,
-it must adhere to restrictions on C++ symbol names (e.g. may only contain
-alphanumeric characters or underscores, and may nor start with a digit).
-A $PREFIX directive was found with an argument (given in the message) that
-violates those restictions.
+The notify_out library tried to send a notify message to the given
+address, but it appears to be an invalid address. The configuration
+for secondary nameservers might contain a typographic error, or a
+different BIND 10 module has forgotten to validate its data before
+sending this module a notify command. As such, this should normally
+not happen, and points to an oversight in a different module.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_RDLOCMES">
-<term>MSG_RDLOCMES reading local message file %1</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_OPCODE">
+<term>NOTIFY_OUT_REPLY_BAD_OPCODE bad opcode in notify reply from %1#%2: %3</term>
 <listitem><para>
-This is an informational message output by BIND10 when it starts to read a
-local message file.  (A local message file may replace the text of one of more
-messages; the ID of the message will not be changed though.)
+The notify_out library sent a notify message to the nameserver at
+the given address, but the response did not have the opcode set to
+NOTIFY. The opcode in the response is printed. Since there was a
+response, no more notifies will be sent to this server for this
+notification event.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_READERR">
-<term>MSG_READERR error reading from message file %1: %2</term>
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QID">
+<term>NOTIFY_OUT_REPLY_BAD_QID bad QID in notify reply from %1#%2: got %3, should be %4</term>
 <listitem><para>
-The specified error was encountered reading from the named message file.
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query id in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_REPLY_BAD_QUERY_NAME">
+<term>NOTIFY_OUT_REPLY_BAD_QUERY_NAME bad query name in notify reply from %1#%2: got %3, should be %4</term>
+<listitem><para>
+The notify_out library sent a notify message to the nameserver at
+the given address, but the query name in the response does not match
+the one we sent. Since there was a response, no more notifies will
+be sent to this server for this notification event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_REPLY_QR_NOT_SET">
+<term>NOTIFY_OUT_REPLY_QR_NOT_SET QR flags set to 0 in reply to notify from %1#%2</term>
+<listitem><para>
+The notify_out library sent a notify message to the namesever at the
+given address, but the reply did not have the QR bit set to one.
+Since there was a response, no more notifies will be sent to this
+server for this notification event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION">
+<term>NOTIFY_OUT_REPLY_UNCAUGHT_EXCEPTION uncaught exception: %1</term>
+<listitem><para>
+There was an uncaught exception in the handling of a notify reply
+message, either in the message parser, or while trying to extract data
+from the parsed message. The error is printed, and notify_out will
+treat the response as a bad message, but this does point to a
+programming error, since all exceptions should have been caught
+explicitly. Please file a bug report. Since there was a response,
+no more notifies will be sent to this server for this notification
+event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_RETRY_EXCEEDED">
+<term>NOTIFY_OUT_RETRY_EXCEEDED notify to %1#%2: number of retries (%3) exceeded</term>
+<listitem><para>
+The maximum number of retries for the notify target has been exceeded.
+Either the address of the secondary nameserver is wrong, or it is not
+responding.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_SENDING_NOTIFY">
+<term>NOTIFY_OUT_SENDING_NOTIFY sending notify to %1#%2</term>
+<listitem><para>
+A notify message is sent to the secondary nameserver at the given
+address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_SOCKET_ERROR">
+<term>NOTIFY_OUT_SOCKET_ERROR socket error sending notify to %1#%2: %3</term>
+<listitem><para>
+There was a network error while trying to send a notify message to
+the given address. The address might be unreachable. The socket
+error is printed and should provide more information.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_UNRECDIR">
-<term>MSG_UNRECDIR line %1: unrecognised directive '%2'</term>
+<varlistentry id="NOTIFY_OUT_SOCKET_RECV_ERROR">
+<term>NOTIFY_OUT_SOCKET_RECV_ERROR socket error reading notify reply from %1#%2: %3</term>
 <listitem><para>
-A line starting with a dollar symbol was found, but the first word on the line
-(shown in the message) was not a recognised message compiler directive.
+There was a network error while trying to read a notify reply
+message from the given address. The socket error is printed and should
+provide more information.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="MSG_WRITERR">
-<term>MSG_WRITERR error writing to %1: %2</term>
+<varlistentry id="NOTIFY_OUT_TIMEOUT">
+<term>NOTIFY_OUT_TIMEOUT retry notify to %1#%2</term>
 <listitem><para>
-The specified error was encountered by the message compiler when writing to
-the named output file.
+The notify message to the given address (noted as address#port) has
+timed out, and the message will be resent until the max retry limit
+is reached.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="NSAS_INVRESPSTR">
-<term>NSAS_INVRESPSTR queried for %1 but got invalid response</term>
+<varlistentry id="NOTIFY_OUT_ZONE_BAD_SOA">
+<term>NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</term>
 <listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver.  The NSAS made a query for a RR for the
-specified nameserver but received an invalid response.  Either the success
-function was called without a DNS message or the message was invalid on some
-way. (In the latter case, the error should have been picked up elsewhere in
-the processing logic, hence the raising of the error here.)
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs.  Notify message won't
+be sent to such a zone.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="NSAS_INVRESPTC">
-<term>NSAS_INVRESPTC queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<varlistentry id="NOTIFY_OUT_ZONE_NO_NS">
+<term>NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</term>
 <listitem><para>
-This message indicates an internal error in the nameserver address store
-component (NSAS) of the resolver.  The NSAS made a query for the given RR
-type and class, but instead received an answer with the given type and class.
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR.  Notify message won't be sent to such a zone.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="NSAS_LOOKUPCANCEL">
-<term>NSAS_LOOKUPCANCEL lookup for zone %1 has been cancelled</term>
+<varlistentry id="NSAS_FIND_NS_ADDRESS">
+<term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
 <listitem><para>
-A debug message, this is output when a NSAS (nameserver address store -
-part of the resolver) lookup for a zone has been cancelled.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) is making a callback into the resolver to retrieve the
+address records for the specified nameserver.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="NSAS_LOOKUPZONE">
-<term>NSAS_LOOKUPZONE searching NSAS for nameservers for zone %1</term>
+<varlistentry id="NSAS_FOUND_ADDRESS">
+<term>NSAS_FOUND_ADDRESS found address %1 for %2</term>
 <listitem><para>
-A debug message, this is output when a call is made to the nameserver address
-store (part of the resolver) to obtain the nameservers for the specified zone.
+A debug message issued when the NSAS (nameserver address store - part
+of the resolver) has retrieved the given address for the specified
+nameserver through an external query.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="NSAS_NSADDR">
-<term>NSAS_NSADDR asking resolver to obtain A and AAAA records for %1</term>
+<varlistentry id="NSAS_INVALID_RESPONSE">
+<term>NSAS_INVALID_RESPONSE queried for %1 but got invalid response</term>
 <listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver) is
-making a callback into the resolver to retrieve the address records for the
-specified nameserver.
+The NSAS (nameserver address store - part of the resolver) made a query
+for a RR for the specified nameserver but received an invalid response.
+Either the success function was called without a DNS message or the
+message was invalid on some way. (In the latter case, the error should
+have been picked up elsewhere in the processing logic, hence the raising
+of the error here.)
+</para><para>
+This message indicates an internal error in the NSAS.  Please raise a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_LOOKUP_CANCEL">
+<term>NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled</term>
+<listitem><para>
+A debug message issued when an NSAS (nameserver address store - part of
+the resolver) lookup for a zone has been canceled.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="NSAS_NSLKUPFAIL">
-<term>NSAS_NSLKUPFAIL failed to lookup any %1 for %2</term>
+<varlistentry id="NSAS_NS_LOOKUP_FAIL">
+<term>NSAS_NS_LOOKUP_FAIL failed to lookup any %1 for %2</term>
 <listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has been unable to retrieve the specified resource record for the specified
-nameserver.  This is not necessarily a problem - the nameserver may be
-unreachable, in which case the NSAS will try other nameservers in the zone.
+A debug message issued when the NSAS (nameserver address store - part of
+the resolver) has been unable to retrieve the specified resource record
+for the specified nameserver.  This is not necessarily a problem - the
+nameserver may be unreachable, in which case the NSAS will try other
+nameservers in the zone.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="NSAS_NSLKUPSUCC">
-<term>NSAS_NSLKUPSUCC found address %1 for %2</term>
+<varlistentry id="NSAS_SEARCH_ZONE_NS">
+<term>NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1</term>
 <listitem><para>
-A debug message, the NSAS (nameserver address store - part of the resolver)
-has retrieved the given address for the specified nameserver through an
-external query.
+A debug message output when a call is made to the NSAS (nameserver
+address store - part of the resolver) to obtain the nameservers for
+the specified zone.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="NSAS_SETRTT">
-<term>NSAS_SETRTT reporting RTT for %1 as %2; new value is now %3</term>
+<varlistentry id="NSAS_UPDATE_RTT">
+<term>NSAS_UPDATE_RTT update RTT for %1: was %2 ms, is now %3 ms</term>
 <listitem><para>
 A NSAS (nameserver address store - part of the resolver) debug message
-reporting the round-trip time (RTT) for a query made to the specified
-nameserver.  The RTT has been updated using the value given and the new RTT is
-displayed.  (The RTT is subject to a calculation that damps out sudden
-changes.  As a result, the new RTT is not necessarily equal to the RTT
-reported.)
+reporting the update of a round-trip time (RTT) for a query made to the
+specified nameserver.  The RTT has been updated using the value given
+and the new RTT is displayed.  (The RTT is subject to a calculation that
+damps out sudden changes.  As a result, the new RTT used by the NSAS in
+future decisions of which nameserver to use is not necessarily equal to
+the RTT reported.)
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NSAS_WRONG_ANSWER">
+<term>NSAS_WRONG_ANSWER queried for %1 RR of type/class %2/%3, received response %4/%5</term>
+<listitem><para>
+A NSAS (nameserver address store - part of the resolver) made a query for
+a resource record of a particular type and class, but instead received
+an answer with a different given type and class.
+</para><para>
+This message indicates an internal error in the NSAS.  Please raise a
+bug report.
 </para></listitem>
 </varlistentry>
 
@@ -1460,16 +3588,16 @@ type> tuple in the cache; instead, the deepest delegation found is indicated.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_FOLLOWCNAME">
-<term>RESLIB_FOLLOWCNAME following CNAME chain to <%1></term>
+<varlistentry id="RESLIB_FOLLOW_CNAME">
+<term>RESLIB_FOLLOW_CNAME following CNAME chain to <%1></term>
 <listitem><para>
 A debug message, a CNAME response was received and another query is being issued
 for the <name, class, type> tuple.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_LONGCHAIN">
-<term>RESLIB_LONGCHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
+<varlistentry id="RESLIB_LONG_CHAIN">
+<term>RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded</term>
 <listitem><para>
 A debug message recording that a CNAME response has been received to an upstream
 query for the specified question (Previous debug messages will have indicated
@@ -1479,26 +3607,26 @@ is where on CNAME points to another) and so an error is being returned.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_NONSRRSET">
-<term>RESLIB_NONSRRSET no NS RRSet in referral response received to query for <%1></term>
+<varlistentry id="RESLIB_NO_NS_RRSET">
+<term>RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1></term>
 <listitem><para>
 A debug message, this indicates that a response was received for the specified
-query and was categorised as a referral.  However, the received message did
+query and was categorized as a referral.  However, the received message did
 not contain any NS RRsets.  This may indicate a programming error in the
 response classification code.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_NSASLOOK">
-<term>RESLIB_NSASLOOK looking up nameserver for zone %1 in the NSAS</term>
+<varlistentry id="RESLIB_NSAS_LOOKUP">
+<term>RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS</term>
 <listitem><para>
 A debug message, the RunningQuery object is querying the NSAS for the
 nameservers for the specified zone.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_NXDOMRR">
-<term>RESLIB_NXDOMRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
+<varlistentry id="RESLIB_NXDOM_NXRR">
+<term>RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1></term>
 <listitem><para>
 A debug message recording that either a NXDOMAIN or an NXRRSET response has
 been received to an upstream query for the specified question.  Previous debug
@@ -1514,8 +3642,8 @@ are no retries left, an error will be reported.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_PROTOCOLRTRY">
-<term>RESLIB_PROTOCOLRTRY protocol error in answer for %1: %2 (retries left: %3)</term>
+<varlistentry id="RESLIB_PROTOCOL_RETRY">
+<term>RESLIB_PROTOCOL_RETRY protocol error in answer for %1: %2 (retries left: %3)</term>
 <listitem><para>
 A debug message indicating that a protocol error was received and that
 the resolver is repeating the query to the same nameserver.  After this
@@ -1523,33 +3651,16 @@ repeated query, there will be the indicated number of retries left.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_RCODERR">
-<term>RESLIB_RCODERR RCODE indicates error in response to query for <%1></term>
+<varlistentry id="RESLIB_RCODE_ERR">
+<term>RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1></term>
 <listitem><para>
 A debug message, the response to the specified query indicated an error
 that is not covered by a specific code path.  A SERVFAIL will be returned.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_REFERRAL">
-<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
-<listitem><para>
-A debug message recording that a referral response has been received to an
-upstream query for the specified question.  Previous debug messages will
-have indicated the server to which the question was sent.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_REFERZONE">
-<term>RESLIB_REFERZONE referred to zone %1</term>
-<listitem><para>
-A debug message indicating that the last referral message was to the specified
-zone.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="RESLIB_RESCAFND">
-<term>RESLIB_RESCAFND found <%1> in the cache (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_FIND">
+<term>RESLIB_RECQ_CACHE_FIND found <%1> in the cache (resolve() instance %2)</term>
 <listitem><para>
 This is a debug message and indicates that a RecursiveQuery object found the
 the specified <name, class, type> tuple in the cache.  The instance number
@@ -1558,8 +3669,8 @@ been called.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_RESCANOTFND">
-<term>RESLIB_RESCANOTFND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RECQ_CACHE_NO_FIND">
+<term>RESLIB_RECQ_CACHE_NO_FIND did not find <%1> in the cache, starting RunningQuery (resolve() instance %2)</term>
 <listitem><para>
 This is a debug message and indicates that the look in the cache made by the
 RecursiveQuery::resolve() method did not find an answer, so a new RunningQuery
@@ -1569,6 +3680,23 @@ been called.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="RESLIB_REFERRAL">
+<term>RESLIB_REFERRAL referral received in response to query for <%1></term>
+<listitem><para>
+A debug message recording that a referral response has been received to an
+upstream query for the specified question.  Previous debug messages will
+have indicated the server to which the question was sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESLIB_REFER_ZONE">
+<term>RESLIB_REFER_ZONE referred to zone %1</term>
+<listitem><para>
+A debug message indicating that the last referral message was to the specified
+zone.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="RESLIB_RESOLVE">
 <term>RESLIB_RESOLVE asked to resolve <%1> (resolve() instance %2)</term>
 <listitem><para>
@@ -1579,8 +3707,8 @@ message indicates which of the two resolve() methods has been called.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_RRSETFND">
-<term>RESLIB_RRSETFND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
+<varlistentry id="RESLIB_RRSET_FOUND">
+<term>RESLIB_RRSET_FOUND found single RRset in the cache when querying for <%1> (resolve() instance %2)</term>
 <listitem><para>
 A debug message, indicating that when RecursiveQuery::resolve queried the
 cache, a single RRset was found which was put in the answer.  The instance
@@ -1596,16 +3724,16 @@ A debug message giving the round-trip time of the last query and response.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_RUNCAFND">
-<term>RESLIB_RUNCAFND found <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_FIND">
+<term>RESLIB_RUNQ_CACHE_FIND found <%1> in the cache</term>
 <listitem><para>
 This is a debug message and indicates that a RunningQuery object found
 the specified <name, class, type> tuple in the cache.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_RUNCALOOK">
-<term>RESLIB_RUNCALOOK looking up up <%1> in the cache</term>
+<varlistentry id="RESLIB_RUNQ_CACHE_LOOKUP">
+<term>RESLIB_RUNQ_CACHE_LOOKUP looking up up <%1> in the cache</term>
 <listitem><para>
 This is a debug message and indicates that a RunningQuery object has made
 a call to its doLookup() method to look up the specified <name, class, type>
@@ -1613,16 +3741,16 @@ tuple, the first action of which will be to examine the cache.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_RUNQUFAIL">
-<term>RESLIB_RUNQUFAIL failure callback - nameservers are unreachable</term>
+<varlistentry id="RESLIB_RUNQ_FAIL">
+<term>RESLIB_RUNQ_FAIL failure callback - nameservers are unreachable</term>
 <listitem><para>
 A debug message indicating that a RunningQuery's failure callback has been
 called because all nameservers for the zone in question are unreachable.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_RUNQUSUCC">
-<term>RESLIB_RUNQUSUCC success callback - sending query to %1</term>
+<varlistentry id="RESLIB_RUNQ_SUCCESS">
+<term>RESLIB_RUNQ_SUCCESS success callback - sending query to %1</term>
 <listitem><para>
 A debug message indicating that a RunningQuery's success callback has been
 called because a nameserver has been found, and that a query is being sent
@@ -1630,19 +3758,19 @@ to the specified nameserver.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_TESTSERV">
-<term>RESLIB_TESTSERV setting test server to %1(%2)</term>
+<varlistentry id="RESLIB_TEST_SERVER">
+<term>RESLIB_TEST_SERVER setting test server to %1(%2)</term>
 <listitem><para>
-This is an internal debugging message and is only generated in unit tests.
-It indicates that all upstream queries from the resolver are being routed to
-the specified server, regardless of the address of the nameserver to which
-the query would normally be routed.  As it should never be seen in normal
-operation, it is a warning message instead of a debug message.
+This is a warning message only generated in unit tests.  It indicates
+that all upstream queries from the resolver are being routed to the
+specified server, regardless of the address of the nameserver to which
+the query would normally be routed.  If seen during normal operation,
+please submit a bug report.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_TESTUPSTR">
-<term>RESLIB_TESTUPSTR sending upstream query for <%1> to test server at %2</term>
+<varlistentry id="RESLIB_TEST_UPSTREAM">
+<term>RESLIB_TEST_UPSTREAM sending upstream query for <%1> to test server at %2</term>
 <listitem><para>
 This is a debug message and should only be seen in unit tests.  A query for
 the specified <name, class, type> tuple is being sent to a test nameserver
@@ -1653,13 +3781,13 @@ whose address is given in the message.
 <varlistentry id="RESLIB_TIMEOUT">
 <term>RESLIB_TIMEOUT query <%1> to %2 timed out</term>
 <listitem><para>
-A debug message indicating that the specified query has timed out and as
-there are no retries left, an error will be reported.
+A debug message indicating that the specified upstream query has timed out and
+there are no retries left.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESLIB_TIMEOUTRTRY">
-<term>RESLIB_TIMEOUTRTRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
+<varlistentry id="RESLIB_TIMEOUT_RETRY">
+<term>RESLIB_TIMEOUT_RETRY query <%1> to %2 timed out, re-trying (retries left: %3)</term>
 <listitem><para>
 A debug message indicating that the specified query has timed out and that
 the resolver is repeating the query to the same nameserver.  After this
@@ -1685,308 +3813,374 @@ tuple is being sent to a nameserver whose address is given in the message.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_AXFRTCP">
-<term>RESOLVER_AXFRTCP AXFR request received over TCP</term>
+<varlistentry id="RESOLVER_AXFR_TCP">
+<term>RESOLVER_AXFR_TCP AXFR request received over TCP</term>
 <listitem><para>
-A debug message, the resolver received a NOTIFY message over TCP.  The server
-cannot process it and will return an error message to the sender with the
-RCODE set to NOTIMP.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over TCP.  Only authoritative servers
+are able to handle AXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_AXFRUDP">
-<term>RESOLVER_AXFRUDP AXFR request received over UDP</term>
+<varlistentry id="RESOLVER_AXFR_UDP">
+<term>RESOLVER_AXFR_UDP AXFR request received over UDP</term>
 <listitem><para>
-A debug message, the resolver received a NOTIFY message over UDP.  The server
-cannot process it (and in any case, an AXFR request should be sent over TCP)
-and will return an error message to the sender with the RCODE set to FORMERR.
+This is a debug message output when the resolver received a request for
+an AXFR (full transfer of a zone) over UDP.  Only authoritative servers
+are able to handle AXFR requests (and in any case, an AXFR request should
+be sent over TCP), so the resolver will return an error message to the
+sender with the RCODE set to NOTIMP.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_CLTMOSMALL">
-<term>RESOLVER_CLTMOSMALL client timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_CLIENT_TIME_SMALL">
+<term>RESOLVER_CLIENT_TIME_SMALL client timeout of %1 is too small</term>
 <listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the client timeout was found to be too small.  The configuration
+update was abandoned and the parameters were not changed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_CONFIGCHAN">
-<term>RESOLVER_CONFIGCHAN configuration channel created</term>
+<varlistentry id="RESOLVER_CONFIG_CHANNEL">
+<term>RESOLVER_CONFIG_CHANNEL configuration channel created</term>
 <listitem><para>
-A debug message, output when the resolver has successfully established a
-connection to the configuration channel.
+This is a debug message output when the resolver has successfully
+established a connection to the configuration channel.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_CONFIGERR">
-<term>RESOLVER_CONFIGERR error in configuration: %1</term>
+<varlistentry id="RESOLVER_CONFIG_ERROR">
+<term>RESOLVER_CONFIG_ERROR error in configuration: %1</term>
 <listitem><para>
-An error was detected in a configuration update received by the resolver. This
-may be in the format of the configuration message (in which case this is a
-programming error) or it may be in the data supplied (in which case it is
-a user error).  The reason for the error, given as a parameter in the message,
-will give more details.
+An error was detected in a configuration update received by the
+resolver. This may be in the format of the configuration message (in
+which case this is a programming error) or it may be in the data supplied
+(in which case it is a user error).  The reason for the error, included
+in the message, will give more details.  The configuration update is
+not applied and the resolver parameters were not changed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_CONFIGLOAD">
-<term>RESOLVER_CONFIGLOAD configuration loaded</term>
+<varlistentry id="RESOLVER_CONFIG_LOADED">
+<term>RESOLVER_CONFIG_LOADED configuration loaded</term>
 <listitem><para>
-A debug message, output when the resolver configuration has been successfully
-loaded.
+This is a debug message output when the resolver configuration has been
+successfully loaded.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_CONFIGUPD">
-<term>RESOLVER_CONFIGUPD configuration updated: %1</term>
+<varlistentry id="RESOLVER_CONFIG_UPDATED">
+<term>RESOLVER_CONFIG_UPDATED configuration updated: %1</term>
 <listitem><para>
-A debug message, the configuration has been updated with the specified
-information.
+This is a debug message output when the resolver configuration is being
+updated with the specified information.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="RESOLVER_CREATED">
 <term>RESOLVER_CREATED main resolver object created</term>
 <listitem><para>
-A debug message, output when the Resolver() object has been created.
+This is a debug message indicating that the main resolver object has
+been created.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_DNSMSGRCVD">
-<term>RESOLVER_DNSMSGRCVD DNS message received: %1</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_RECEIVED">
+<term>RESOLVER_DNS_MESSAGE_RECEIVED DNS message received: %1</term>
 <listitem><para>
-A debug message, this always precedes some other logging message and is the
-formatted contents of the DNS packet that the other message refers to.
+This is a debug message from the resolver listing the contents of a
+received DNS message.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_DNSMSGSENT">
-<term>RESOLVER_DNSMSGSENT DNS message of %1 bytes sent: %2</term>
+<varlistentry id="RESOLVER_DNS_MESSAGE_SENT">
+<term>RESOLVER_DNS_MESSAGE_SENT DNS message of %1 bytes sent: %2</term>
 <listitem><para>
-A debug message, this contains details of the response sent back to the querying
-system.
+This is a debug message containing details of the response returned by
+the resolver to the querying system.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="RESOLVER_FAILED">
 <term>RESOLVER_FAILED resolver failed, reason: %1</term>
 <listitem><para>
-This is an error message output when an unhandled exception is caught by the
-resolver.  All it can do is to shut down.
+This is an error message output when an unhandled exception is caught
+by the resolver.  After this, the resolver will shut itself down.
+Please submit a bug report.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_FWDADDR">
-<term>RESOLVER_FWDADDR setting forward address %1(%2)</term>
+<varlistentry id="RESOLVER_FORWARD_ADDRESS">
+<term>RESOLVER_FORWARD_ADDRESS setting forward address %1(%2)</term>
 <listitem><para>
-This message may appear multiple times during startup, and it lists the
-forward addresses used by the resolver when running in forwarding mode.
+If the resolver is running in forward mode, this message will appear
+during startup to list the forward address.  If multiple addresses are
+specified, it will appear once for each address.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_FWDQUERY">
-<term>RESOLVER_FWDQUERY processing forward query</term>
+<varlistentry id="RESOLVER_FORWARD_QUERY">
+<term>RESOLVER_FORWARD_QUERY processing forward query</term>
 <listitem><para>
-The received query has passed all checks and is being forwarded to upstream
+This is a debug message indicating that a query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
 servers.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_HDRERR">
-<term>RESOLVER_HDRERR message received, exception when processing header: %1</term>
+<varlistentry id="RESOLVER_HEADER_ERROR">
+<term>RESOLVER_HEADER_ERROR message received, exception when processing header: %1</term>
 <listitem><para>
-A debug message noting that an exception occurred during the processing of
-a received packet.  The packet has been dropped.
+This is a debug message from the resolver noting that an exception
+occurred during the processing of a received packet.  The packet has
+been dropped.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="RESOLVER_IXFR">
 <term>RESOLVER_IXFR IXFR request received</term>
 <listitem><para>
-The resolver received a NOTIFY message over TCP.  The server cannot process it
-and will return an error message to the sender with the RCODE set to NOTIMP.
+This is a debug message indicating that the resolver received a request
+for an IXFR (incremental transfer of a zone).  Only authoritative servers
+are able to handle IXFR requests, so the resolver will return an error
+message to the sender with the RCODE set to NOTIMP.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_LKTMOSMALL">
-<term>RESOLVER_LKTMOSMALL lookup timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_LOOKUP_TIME_SMALL">
+<term>RESOLVER_LOOKUP_TIME_SMALL lookup timeout of %1 is too small</term>
 <listitem><para>
-An error indicating that the configuration value specified for the lookup
-timeout is too small.
+During the update of the resolver's configuration parameters, the value
+of the lookup timeout was found to be too small.  The configuration
+update will not be applied.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_NFYNOTAUTH">
-<term>RESOLVER_NFYNOTAUTH NOTIFY arrived but server is not authoritative</term>
+<varlistentry id="RESOLVER_MESSAGE_ERROR">
+<term>RESOLVER_MESSAGE_ERROR error parsing received message: %1 - returning %2</term>
 <listitem><para>
-The resolver received a NOTIFY message.  As the server is not authoritative it
-cannot process it, so it returns an error message to the sender with the RCODE
-set to NOTAUTH.
+This is a debug message noting that parsing of the body of a received
+message by the resolver failed due to some error (although the parsing of
+the header succeeded).  The message parameters give a textual description
+of the problem and the RCODE returned.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_NORMQUERY">
-<term>RESOLVER_NORMQUERY processing normal query</term>
+<varlistentry id="RESOLVER_NEGATIVE_RETRIES">
+<term>RESOLVER_NEGATIVE_RETRIES negative number of retries (%1) specified in the configuration</term>
 <listitem><para>
-The received query has passed all checks and is being processed by the resolver.
+This error is issued when a resolver configuration update has specified
+a negative retry count: only zero or positive values are valid.  The
+configuration update was abandoned and the parameters were not changed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_NOROOTADDR">
-<term>RESOLVER_NOROOTADDR no root addresses available</term>
+<varlistentry id="RESOLVER_NON_IN_PACKET">
+<term>RESOLVER_NON_IN_PACKET non-IN class request received, returning REFUSED message</term>
 <listitem><para>
-A warning message during startup, indicates that no root addresses have been
-set.  This may be because the resolver will get them from a priming query.
+This debug message is issued when resolver has received a DNS packet that
+was not IN (Internet) class.  The resolver cannot handle such packets,
+so is returning a REFUSED response to the sender.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_NOTIN">
-<term>RESOLVER_NOTIN non-IN class request received, returning REFUSED message</term>
+<varlistentry id="RESOLVER_NORMAL_QUERY">
+<term>RESOLVER_NORMAL_QUERY processing normal query</term>
 <listitem><para>
-A debug message, the resolver has received a DNS packet that was not IN class.
-The resolver cannot handle such packets, so is returning a REFUSED response to
-the sender.
+This is a debug message indicating that the query received by the resolver
+has passed a set of checks (message is well-formed, it is allowed by the
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_NOTONEQUES">
-<term>RESOLVER_NOTONEQUES query contained %1 questions, exactly one question was expected</term>
+<varlistentry id="RESOLVER_NOTIFY_RECEIVED">
+<term>RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative</term>
 <listitem><para>
-A debug message, the resolver received a query that contained the number of
-entires in the question section detailed in the message.  This is a malformed
-message, as a DNS query must contain only one question.  The resolver will
-return a message to the sender with the RCODE set to FORMERR.
+The resolver has received a NOTIFY message.  As the server is not
+authoritative it cannot process it, so it returns an error message to
+the sender with the RCODE set to NOTAUTH.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_OPCODEUNS">
-<term>RESOLVER_OPCODEUNS opcode %1 not supported by the resolver</term>
+<varlistentry id="RESOLVER_NOT_ONE_QUESTION">
+<term>RESOLVER_NOT_ONE_QUESTION query contained %1 questions, exactly one question was expected</term>
 <listitem><para>
-A debug message, the resolver received a message with an unsupported opcode
-(it can only process QUERY opcodes).  It will return a message to the sender
-with the RCODE set to NOTIMP.
+This debug message indicates that the resolver received a query that
+contained the number of entries in the question section detailed in
+the message.  This is a malformed message, as a DNS query must contain
+only one question.  The resolver will return a message to the sender
+with the RCODE set to FORMERR.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_PARSEERR">
-<term>RESOLVER_PARSEERR error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_NO_ROOT_ADDRESS">
+<term>RESOLVER_NO_ROOT_ADDRESS no root addresses available</term>
 <listitem><para>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some non-protocol related reason
-(although the parsing of the header succeeded).  The message parameters give
-a textual description of the problem and the RCODE returned.
+A warning message issued during resolver startup, this indicates that
+no root addresses have been set.  This may be because the resolver will
+get them from a priming query.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_PRINTMSG">
-<term>RESOLVER_PRINTMSG print message command, aeguments are: %1</term>
+<varlistentry id="RESOLVER_PARSE_ERROR">
+<term>RESOLVER_PARSE_ERROR error parsing received message: %1 - returning %2</term>
 <listitem><para>
-This message is logged when a "print_message" command is received over the
-command channel.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some non-protocol
+related reason (although the parsing of the header succeeded).
+The message parameters give a textual description of the problem and
+the RCODE returned.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_PROTERR">
-<term>RESOLVER_PROTERR protocol error parsing received message: %1 - returning %2</term>
+<varlistentry id="RESOLVER_PRINT_COMMAND">
+<term>RESOLVER_PRINT_COMMAND print message command, arguments are: %1</term>
 <listitem><para>
-A debug message noting that the resolver received a message and the parsing
-of the body of the message failed due to some protocol error (although the
-parsing of the header succeeded).  The message parameters give a textual
-description of the problem and the RCODE returned.
+This debug message is logged when a "print_message" command is received
+by the resolver over the command channel.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_QUSETUP">
-<term>RESOLVER_QUSETUP query setup</term>
+<varlistentry id="RESOLVER_PROTOCOL_ERROR">
+<term>RESOLVER_PROTOCOL_ERROR protocol error parsing received message: %1 - returning %2</term>
 <listitem><para>
-A debug message noting that the resolver is creating a RecursiveQuery object.
+This is a debug message noting that the resolver received a message and
+the parsing of the body of the message failed due to some protocol error
+(although the parsing of the header succeeded).  The message parameters
+give a textual description of the problem and the RCODE returned.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_QUSHUT">
-<term>RESOLVER_QUSHUT query shutdown</term>
+<varlistentry id="RESOLVER_QUERY_ACCEPTED">
+<term>RESOLVER_QUERY_ACCEPTED query accepted: '%1/%2/%3' from %4</term>
 <listitem><para>
-A debug message noting that the resolver is destroying a RecursiveQuery object.
+This debug message is produced by the resolver when an incoming query
+is accepted in terms of the query ACL.  The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_QUTMOSMALL">
-<term>RESOLVER_QUTMOSMALL query timeout of %1 is too small</term>
+<varlistentry id="RESOLVER_QUERY_DROPPED">
+<term>RESOLVER_QUERY_DROPPED query dropped: '%1/%2/%3' from %4</term>
 <listitem><para>
-An error indicating that the configuration value specified for the query
-timeout is too small.
+This is an informational message that indicates an incoming query has
+been dropped by the resolver because of the query ACL.  Unlike the
+RESOLVER_QUERY_REJECTED case, the server does not return any response.
+The log message shows the query in the form of <query name>/<query
+type>/<query class>, and the client that sends the query in the form of
+<Source IP address>#<source port>.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_RECURSIVE">
-<term>RESOLVER_RECURSIVE running in recursive mode</term>
+<varlistentry id="RESOLVER_QUERY_REJECTED">
+<term>RESOLVER_QUERY_REJECTED query rejected: '%1/%2/%3' from %4</term>
 <listitem><para>
-This is an informational message that appears at startup noting that the
-resolver is running in recursive mode.
+This is an informational message that indicates an incoming query has
+been rejected by the resolver because of the query ACL.  This results
+in a response with an RCODE of REFUSED. The log message shows the query
+in the form of <query name>/<query type>/<query class>, and the client
+that sends the query in the form of <Source IP address>#<source port>.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_RECVMSG">
-<term>RESOLVER_RECVMSG resolver has received a DNS message</term>
+<varlistentry id="RESOLVER_QUERY_SETUP">
+<term>RESOLVER_QUERY_SETUP query setup</term>
 <listitem><para>
-A debug message indicating that the resolver has received a message.  Depending
-on the debug settings, subsequent log output will indicate the nature of the
-message.
+This is a debug message noting that the resolver is creating a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_SHUTDOWN">
+<term>RESOLVER_QUERY_SHUTDOWN query shutdown</term>
+<listitem><para>
+This is a debug message noting that the resolver is destroying a
+RecursiveQuery object.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_QUERY_TIME_SMALL">
+<term>RESOLVER_QUERY_TIME_SMALL query timeout of %1 is too small</term>
+<listitem><para>
+During the update of the resolver's configuration parameters, the value
+of the query timeout was found to be too small.  The configuration
+parameters were not changed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_RETRYNEG">
-<term>RESOLVER_RETRYNEG negative number of retries (%1) specified in the configuration</term>
+<varlistentry id="RESOLVER_RECEIVED_MESSAGE">
+<term>RESOLVER_RECEIVED_MESSAGE resolver has received a DNS message</term>
 <listitem><para>
-An error message indicating that the resolver configuration has specified a
-negative retry count.  Only zero or positive values are valid.
+This is a debug message indicating that the resolver has received a
+DNS message.  Depending on the debug settings, subsequent log output
+will indicate the nature of the message.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_ROOTADDR">
-<term>RESOLVER_ROOTADDR setting root address %1(%2)</term>
+<varlistentry id="RESOLVER_RECURSIVE">
+<term>RESOLVER_RECURSIVE running in recursive mode</term>
 <listitem><para>
-This message may appear multiple times during startup; it lists the root
-addresses used by the resolver.
+This is an informational message that appears at startup noting that
+the resolver is running in recursive mode.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_SERVICE">
-<term>RESOLVER_SERVICE service object created</term>
+<varlistentry id="RESOLVER_SERVICE_CREATED">
+<term>RESOLVER_SERVICE_CREATED service object created</term>
 <listitem><para>
-A debug message, output when the main service object (which handles the
-received queries) is created.
+This debug message is output when resolver creates the main service object
+(which handles the received queries).
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_SETPARAM">
-<term>RESOLVER_SETPARAM query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
+<varlistentry id="RESOLVER_SET_PARAMS">
+<term>RESOLVER_SET_PARAMS query timeout: %1, client timeout: %2, lookup timeout: %3, retry count: %4</term>
 <listitem><para>
-A debug message, lists the parameters associated with the message.  These are:
+This debug message lists the parameters being set for the resolver.  These are:
 query timeout: the timeout (in ms) used for queries originated by the resolver
-to upstream servers.  Client timeout: the interval to resolver a query by
+to upstream servers.  Client timeout: the interval to resolve a query by
 a client: after this time, the resolver sends back a SERVFAIL to the client
-whilst continuing to resolver the query. Lookup timeout: the time at which the
+whilst continuing to resolve the query. Lookup timeout: the time at which the
 resolver gives up trying to resolve a query.  Retry count: the number of times
 the resolver will retry a query to an upstream server if it gets a timeout.
 </para><para>
 The client and lookup timeouts require a bit more explanation. The
-resolution of the clent query might require a large number of queries to
+resolution of the client query might require a large number of queries to
 upstream nameservers.  Even if none of these queries timeout, the total time
 taken to perform all the queries may exceed the client timeout.  When this
 happens, a SERVFAIL is returned to the client, but the resolver continues
-with the resolution process. Data received is added to the cache.  However,
-there comes a time - the lookup timeout - when even the resolve gives up.
+with the resolution process; data received is added to the cache.  However,
+there comes a time - the lookup timeout - when even the resolver gives up.
 At this point it will wait for pending upstream queries to complete or
 timeout and drop the query.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="RESOLVER_SET_QUERY_ACL">
+<term>RESOLVER_SET_QUERY_ACL query ACL is configured</term>
+<listitem><para>
+This debug message is generated when a new query ACL is configured for
+the resolver.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_SET_ROOT_ADDRESS">
+<term>RESOLVER_SET_ROOT_ADDRESS setting root address %1(%2)</term>
+<listitem><para>
+This message gives the address of one of the root servers used by the
+resolver.  It is output during startup and may appear multiple times,
+once for each root server address.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="RESOLVER_SHUTDOWN">
 <term>RESOLVER_SHUTDOWN resolver shutdown complete</term>
 <listitem><para>
-This information message is output when the resolver has shut down.
+This informational message is output when the resolver has shut down.
 </para></listitem>
 </varlistentry>
 
@@ -2005,11 +4199,1192 @@ An informational message, this is output when the resolver starts up.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="RESOLVER_UNEXRESP">
-<term>RESOLVER_UNEXRESP received unexpected response, ignoring</term>
+<varlistentry id="RESOLVER_UNEXPECTED_RESPONSE">
+<term>RESOLVER_UNEXPECTED_RESPONSE received unexpected response, ignoring</term>
+<listitem><para>
+This is a debug message noting that the resolver received a DNS response
+packet on the port on which is it listening for queries.  The packet
+has been ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="RESOLVER_UNSUPPORTED_OPCODE">
+<term>RESOLVER_UNSUPPORTED_OPCODE opcode %1 not supported by the resolver</term>
+<listitem><para>
+This is debug message output when the resolver received a message with an
+unsupported opcode (it can only process QUERY opcodes).  It will return
+a message to the sender with the RCODE set to NOTIMP.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESSES_NOT_LIST">
+<term>SRVCOMM_ADDRESSES_NOT_LIST the address and port specification is not a list in %1</term>
+<listitem><para>
+This points to an error in configuration. What was supposed to be a list of
+IP address - port pairs isn't a list at all but something else.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_FAIL">
+<term>SRVCOMM_ADDRESS_FAIL failed to listen on addresses (%1)</term>
+<listitem><para>
+The server failed to bind to one of the address/port pair it should according
+to configuration, for reason listed in the message (usually because that pair
+is already used by other service or missing privileges). The server will try
+to recover and bind the address/port pairs it was listening to before (if any).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_MISSING">
+<term>SRVCOMM_ADDRESS_MISSING address specification is missing "address" or "port" element in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration is missing either an address or port and so cannot be used. The
+specification causing the error is given in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_TYPE">
+<term>SRVCOMM_ADDRESS_TYPE address specification type is invalid in %1</term>
+<listitem><para>
+This points to an error in configuration. An address specification in the
+configuration malformed. The specification causing the error is given in the
+message. A valid specification contains an address part (which must be a string
+and must represent a valid IPv4 or IPv6 address) and port (which must be an
+integer in the range valid for TCP/UDP ports on your system).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_UNRECOVERABLE">
+<term>SRVCOMM_ADDRESS_UNRECOVERABLE failed to recover original addresses also (%2)</term>
+<listitem><para>
+The recovery of old addresses after SRVCOMM_ADDRESS_FAIL also failed for
+the reason listed.
+</para><para>
+The condition indicates problems with the server and/or the system on
+which it is running.  The server will continue running to allow
+reconfiguration, but will not be listening on any address or port until
+an administrator does so.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_ADDRESS_VALUE">
+<term>SRVCOMM_ADDRESS_VALUE address to set: %1#%2</term>
+<listitem><para>
+Debug message. This lists one address and port value of the set of
+addresses we are going to listen on (eg. there will be one log message
+per pair). This appears only after SRVCOMM_SET_LISTEN, but might
+be hidden, as it has higher debug level.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_DEINIT">
+<term>SRVCOMM_KEYS_DEINIT deinitializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is deinitializing the TSIG keyring.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_INIT">
+<term>SRVCOMM_KEYS_INIT initializing TSIG keyring</term>
+<listitem><para>
+Debug message indicating that the server is initializing the global TSIG
+keyring. This should be seen only at server start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_KEYS_UPDATE">
+<term>SRVCOMM_KEYS_UPDATE updating TSIG keyring</term>
+<listitem><para>
+Debug message indicating new keyring is being loaded from configuration (either
+on startup or as a result of configuration update).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_PORT_RANGE">
+<term>SRVCOMM_PORT_RANGE port out of valid range (%1 in %2)</term>
+<listitem><para>
+This points to an error in configuration. The port in an address
+specification is outside the valid range of 0 to 65535.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="SRVCOMM_SET_LISTEN">
+<term>SRVCOMM_SET_LISTEN setting addresses to listen to</term>
+<listitem><para>
+Debug message, noting that the server is about to start listening on a
+different set of IP addresses and ports than before.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_BAD_OPTION_VALUE">
+<term>STATHTTPD_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats-httpd module was called with a bad command-line argument
+and will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CC_SESSION_ERROR">
+<term>STATHTTPD_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats-httpd module was unable to connect to the BIND 10 command
+and control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats-httpd module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING">
+<term>STATHTTPD_CLOSING closing %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will stop listening for requests on the given
+address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_CLOSING_CC_SESSION">
+<term>STATHTTPD_CLOSING_CC_SESSION stopping cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is disconnecting
+from the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_HANDLE_CONFIG">
+<term>STATHTTPD_HANDLE_CONFIG reading configuration: %1</term>
+<listitem><para>
+The stats-httpd daemon has received new configuration data and will now
+process it. The (changed) data is printed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats-httpd module, and it will
+now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_STATUS_COMMAND">
+<term>STATHTTPD_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats-httpd module, and it will
+respond with 'Stats Httpd is up.' and its PID.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_RECEIVED_UNKNOWN_COMMAND">
+<term>STATHTTPD_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats-httpd module. The
+stats-httpd module will respond with an error, and the command will
+be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_DATAERROR">
+<term>STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_ERROR">
+<term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 500
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points to a module that is not
+responding correctly to statistic requests.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SERVER_INIT_ERROR">
+<term>STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon receiving its configuration data. The most likely cause
+is a port binding problem or a bad configuration value. The specific
+error is printed in the message. The new configuration is ignored,
+and an error is sent back.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_SHUTDOWN">
+<term>STATHTTPD_SHUTDOWN shutting down</term>
+<listitem><para>
+The stats-httpd daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTED">
+<term>STATHTTPD_STARTED listening on %1#%2</term>
+<listitem><para>
+The stats-httpd daemon will now start listening for requests on the
+given address and port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STARTING_CC_SESSION">
+<term>STATHTTPD_STARTING_CC_SESSION starting cc session</term>
+<listitem><para>
+Debug message indicating that the stats-httpd module is connecting to
+the command and control bus.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_START_SERVER_INIT_ERROR">
+<term>STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1</term>
+<listitem><para>
+There was a problem initializing the HTTP server in the stats-httpd
+module upon startup. The most likely cause is that it was not able
+to bind to the listening port. The specific error is printed, and the
+module will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_STOPPED_BY_KEYBOARD">
+<term>STATHTTPD_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats-httpd
+daemon. The daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATHTTPD_UNKNOWN_CONFIG_ITEM">
+<term>STATHTTPD_UNKNOWN_CONFIG_ITEM unknown configuration item: %1</term>
+<listitem><para>
+The stats-httpd daemon received a configuration update from the
+configuration manager. However, one of the items in the
+configuration is unknown. The new configuration is ignored, and an
+error is sent back. As possible cause is that there was an upgrade
+problem, and the stats-httpd version is out of sync with the rest of
+the system.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_BAD_OPTION_VALUE">
+<term>STATS_BAD_OPTION_VALUE bad command line argument: %1</term>
+<listitem><para>
+The stats module was called with a bad command-line argument and will
+not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_CC_SESSION_ERROR">
+<term>STATS_CC_SESSION_ERROR error connecting to message bus: %1</term>
+<listitem><para>
+The stats module was unable to connect to the BIND 10 command and
+control bus. A likely problem is that the message bus daemon
+(b10-msgq) is not running. The stats module will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_NEW_CONFIG">
+<term>STATS_RECEIVED_NEW_CONFIG received new configuration: %1</term>
+<listitem><para>
+This debug message is printed when the stats module has received a
+configuration update from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND">
+<term>STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema</term>
+<listitem><para>
+The stats module received a command to show all statistics schemas of all modules.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND">
+<term>STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1</term>
+<listitem><para>
+The stats module received a command to show the specified statistics schema of the specified module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_ALL_COMMAND">
+<term>STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</term>
+<listitem><para>
+The stats module received a command to show all statistics that it has
+collected.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHOW_NAME_COMMAND">
+<term>STATS_RECEIVED_SHOW_NAME_COMMAND received command to show statistics for %1</term>
+<listitem><para>
+The stats module received a command to show the statistics that it has
+collected for the given item.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_SHUTDOWN_COMMAND">
+<term>STATS_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+A shutdown command was sent to the stats module and it will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_STATUS_COMMAND">
+<term>STATS_RECEIVED_STATUS_COMMAND received command to return status</term>
+<listitem><para>
+A status command was sent to the stats module. It will return a
+response indicating that it is running normally.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_RECEIVED_UNKNOWN_COMMAND">
+<term>STATS_RECEIVED_UNKNOWN_COMMAND received unknown command: %1</term>
+<listitem><para>
+An unknown command has been sent to the stats module. The stats module
+will respond with an error and the command will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_SEND_REQUEST_BOSS">
+<term>STATS_SEND_REQUEST_BOSS requesting boss to send statistics</term>
+<listitem><para>
+This debug message is printed when a request is sent to the boss module
+to send its data to the stats module.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_STARTING">
+<term>STATS_STARTING starting</term>
+<listitem><para>
+The stats module will be now starting.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_START_ERROR">
+<term>STATS_START_ERROR stats module error: %1</term>
+<listitem><para>
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_STOPPED_BY_KEYBOARD">
+<term>STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the stats module. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="STATS_UNKNOWN_COMMAND_IN_SPEC">
+<term>STATS_UNKNOWN_COMMAND_IN_SPEC unknown command in specification file: %1</term>
+<listitem><para>
+The specification file for the stats module contains a command that
+is unknown in the implementation. The most likely cause is an
+installation problem, where the specification file stats.spec is
+from a different version of BIND 10 than the stats module itself.
+Please check your installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_DATABASE_FAILURE">
+<term>XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</term>
+<listitem><para>
+The AXFR transfer for the given zone has failed due to a database problem.
+The error is shown in the log message.  Note: due to the code structure
+this can only happen for AXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_AXFR_INCONSISTENT_SOA">
+<term>XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received</term>
+<listitem><para>
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same.  According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold.  There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected.  On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs.  For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found.  If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_ADDR_FORMAT">
+<term>XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</term>
+<listitem><para>
+The given master address is not a valid IP address.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_MASTER_PORT_FORMAT">
+<term>XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</term>
+<listitem><para>
+The master port as read from the configuration is not a valid port number.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_TSIG_KEY_STRING">
+<term>XFRIN_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_BAD_ZONE_CLASS">
+<term>XFRIN_BAD_ZONE_CLASS Invalid zone class: %1</term>
+<listitem><para>
+The zone class as read from the configuration is not a valid DNS class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CC_SESSION_ERROR">
+<term>XFRIN_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that xfrin the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_COMMAND_ERROR">
+<term>XFRIN_COMMAND_ERROR error while executing command '%1': %2</term>
+<listitem><para>
+There was an error while the given command was being processed. The
+error is given in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_CONNECT_MASTER">
+<term>XFRIN_CONNECT_MASTER error connecting to master at %1: %2</term>
+<listitem><para>
+There was an error opening a connection to the master. The error is
+shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_GOT_INCREMENTAL_RESP">
+<term>XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1</term>
+<listitem><para>
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found.  This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_GOT_NONINCREMENTAL_RESP">
+<term>XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1</term>
+<listitem><para>
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA.  Non incremental transfer is
+either AXFR or AXFR-style IXFR.  In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_IMPORT_DNS">
+<term>XFRIN_IMPORT_DNS error importing python DNS module: %1</term>
+<listitem><para>
+There was an error importing the python DNS module pydnspp. The most
+likely cause is a PYTHONPATH problem.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR">
+<term>XFRIN_MSGQ_SEND_ERROR error while contacting %1 and %2</term>
+<listitem><para>
+There was a problem sending a message to the xfrout module or the
+zone manager. This most likely means that the msgq daemon has quit or
+was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER">
+<term>XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</term>
+<listitem><para>
+There was a problem sending a message to the zone manager. This most
+likely means that the msgq daemon has quit or was killed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_NOTIFY_UNKNOWN_MASTER">
+<term>XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3</term>
+<listitem><para>
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_RETRANSFER_UNKNOWN_ZONE">
+<term>XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</term>
+<listitem><para>
+There was an internal command to retransfer the given zone, but the
+zone is not known to the system. This may indicate that the configuration
+for xfrin is incomplete, or there was a typographical error in the
+zone name in the configuration.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STARTING">
+<term>XFRIN_STARTING starting resolver with command line '%1'</term>
+<listitem><para>
+An informational message, this is output when the resolver starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_STOPPED_BY_KEYBOARD">
+<term>XFRIN_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrin daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_UNKNOWN_ERROR">
+<term>XFRIN_UNKNOWN_ERROR unknown error: %1</term>
+<listitem><para>
+An uncaught exception was raised while running the xfrin daemon. The
+exception message is printed in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_OTHER_FAILURE">
+<term>XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3</term>
+<listitem><para>
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module.  Possible reasons are a broken DNS message or failure
+in database connection.  The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_PROCESS_FAILURE">
+<term>XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</term>
+<listitem><para>
+An XFR session failed outside the main protocol handling.  This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure.  Details of the error are shown in
+the log message.  In general, these errors are not really expected
+ones, and indicate an installation error or a program bug.  The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete.  So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_FAILURE">
+<term>XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</term>
+<listitem><para>
+The XFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_FALLBACK">
+<term>XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</term>
+<listitem><para>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_STARTED">
+<term>XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</term>
+<listitem><para>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFRIN_XFR_TRANSFER_SUCCESS">
+<term>XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded</term>
+<listitem><para>
+The XFR transfer of the given zone was successfully completed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
+<term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
+<listitem><para>
+The TSIG key string as read from the configuration does not represent
+a valid TSIG key.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_ERROR">
+<term>XFROUT_CC_SESSION_ERROR error reading from cc channel: %1</term>
+<listitem><para>
+There was a problem reading from the command and control channel. The
+most likely cause is that the msgq daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CC_SESSION_TIMEOUT_ERROR">
+<term>XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response</term>
+<listitem><para>
+There was a problem reading a response from another module over the
+command and control channel. The most likely cause is that the
+configuration manager b10-cfgmgr is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_CONFIG_ERROR">
+<term>XFROUT_CONFIG_ERROR error found in configuration data: %1</term>
+<listitem><para>
+The xfrout process encountered an error when installing the configuration at
+startup time.  Details of the error are included in the log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_FETCH_REQUEST_ERROR">
+<term>XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</term>
+<listitem><para>
+There was a socket error while contacting the b10-auth daemon to
+fetch a transfer request. The auth daemon may have shutdown.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_HANDLE_QUERY_ERROR">
+<term>XFROUT_HANDLE_QUERY_ERROR error while handling query: %1</term>
+<listitem><para>
+There was a general error handling an xfrout query. The error is shown
+in the message. In principle this error should not appear, and points
+to an oversight catching exceptions in the right place. However, to
+ensure the daemon keeps running, this error is caught and reported.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IMPORT">
+<term>XFROUT_IMPORT error importing python module: %1</term>
+<listitem><para>
+There was an error importing a python module. One of the modules needed
+by xfrout could not be found. This suggests that either some libraries
+are missing on the system, or the PYTHONPATH variable is not correct.
+The specific place where this library needs to be depends on your
+system and your specific installation.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_MULTIPLE_SOA">
+<term>XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</term>
+<listitem><para>
+An IXFR request was received with more than one SOA RRs in the authority
+section.  The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_JOURNAL_SUPPORT">
+<term>XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received but the underlying data source did
+not support journaling.  The xfrout daemon fell back to AXFR-style
+IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_SOA">
+<term>XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</term>
+<listitem><para>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_VERSION">
+<term>XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received, but the requested range of differences
+were not found in the data source.  The xfrout daemon fell back to
+AXFR-style IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_ZONE">
+<term>XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</term>
+<listitem><para>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source.  This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone.  The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_UPTODATE">
+<term>XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</term>
+<listitem><para>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server.  The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic.  This will soon
+be implemented.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_MODULECC_SESSION_ERROR">
+<term>XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</term>
+<listitem><para>
+There was a problem in the lower level module handling configuration and
+control commands.  This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization.  A detailed error message from the module
+will also be displayed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG">
+<term>XFROUT_NEW_CONFIG Update xfrout configuration</term>
+<listitem><para>
+New configuration settings have been sent from the configuration
+manager. The xfrout daemon will now apply them.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NEW_CONFIG_DONE">
+<term>XFROUT_NEW_CONFIG_DONE Update xfrout configuration done</term>
+<listitem><para>
+The xfrout daemon is now done reading the new configuration settings
+received from the configuration manager.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_NOTIFY_COMMAND">
+<term>XFROUT_NOTIFY_COMMAND received command to send notifies for %1/%2</term>
+<listitem><para>
+The xfrout daemon received a command on the command channel that
+NOTIFY packets should be sent for the given zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PARSE_QUERY_ERROR">
+<term>XFROUT_PARSE_QUERY_ERROR error parsing query: %1</term>
+<listitem><para>
+There was a parse error while reading an incoming query. The parse
+error is shown in the log message. A remote client sent a packet we
+do not understand or support. The xfrout request will be ignored.
+In general, this should only occur for unexpected problems like
+memory allocation failures, as the query should already have been
+parsed by the b10-auth daemon, before it was passed here.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_PROCESS_REQUEST_ERROR">
+<term>XFROUT_PROCESS_REQUEST_ERROR error processing transfer request: %2</term>
+<listitem><para>
+There was an error processing a transfer request. The error is included
+in the log message, but at this point no specific information other
+than that could be given. This points to incomplete exception handling
+in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_DROPPED">
+<term>XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to
+given host.  This is required by the ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_QUOTA_EXCCEEDED">
+<term>XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</term>
+<listitem><para>
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message.  The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_REJECTED">
+<term>XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</term>
+<listitem><para>
+The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
+given host. This is because of ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVED_SHUTDOWN_COMMAND">
+<term>XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</term>
+<listitem><para>
+The xfrout daemon received a shutdown command from the command channel
+and will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR">
+<term>XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection</term>
+<listitem><para>
+There was an error receiving the file descriptor for the transfer
+request. Normally, the request is received by b10-auth, and passed on
+to the xfrout daemon, so it can answer directly. However, there was a
+problem receiving this file descriptor. The request will be ignored.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_OLD_UNIX_SOCKET_FILE_ERROR error removing unix socket file %1: %2</term>
+<listitem><para>
+The unix socket file xfrout needs for contact with the auth daemon
+already exists, and needs to be removed first, but there is a problem
+removing it. It is likely that we do not have permission to remove
+this file. The specific error is show in the log message. The xfrout
+daemon will shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR">
+<term>XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR error clearing unix socket file %1: %2</term>
+<listitem><para>
+When shutting down, the xfrout daemon tried to clear the unix socket
+file used for communication with the auth daemon. It failed to remove
+the file. The reason for the failure is given in the error message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_SOCKET_SELECT_ERROR">
+<term>XFROUT_SOCKET_SELECT_ERROR error while calling select() on request socket: %1</term>
+<listitem><para>
+There was an error while calling select() on the socket that informs
+the xfrout daemon that a new xfrout request has arrived. This should
+be a result of rare local error such as memory allocation failure and
+shouldn't happen under normal conditions. The error is included in the
+log message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPED_BY_KEYBOARD">
+<term>XFROUT_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
+<listitem><para>
+There was a keyboard interrupt signal to stop the xfrout daemon. The
+daemon will now shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_STOPPING">
+<term>XFROUT_STOPPING the xfrout daemon is shutting down</term>
+<listitem><para>
+The current transfer is aborted, as the xfrout daemon is shutting down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_UNIX_SOCKET_FILE_IN_USE">
+<term>XFROUT_UNIX_SOCKET_FILE_IN_USE another xfrout process seems to be using the unix socket file %1</term>
+<listitem><para>
+While starting up, the xfrout daemon tried to clear the unix domain
+socket needed for contacting the b10-auth daemon to pass requests
+on, but the file is in use. The most likely cause is that another
+xfrout daemon process is still running. This xfrout daemon (the one
+printing this message) will not start.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_CHECK_ERROR">
+<term>XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</term>
+<listitem><para>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage.  Some detail of the error is also included in the
+message.  The xfrout server tries to return a SERVFAIL response in this case.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_DONE">
+<term>XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_ERROR">
+<term>XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_FAILED">
+<term>XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_STARTED">
+<term>XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_CCSESSION_ERROR">
+<term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
+<listitem><para>
+An error was encountered on the command channel.  The message indicates
+the nature of the error.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_JITTER_TOO_BIG">
+<term>ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5</term>
+<listitem><para>
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_KEYBOARD_INTERRUPT">
+<term>ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt</term>
+<listitem><para>
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_LOAD_ZONE">
+<term>ZONEMGR_LOAD_ZONE loading zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_MASTER_ADDRESS">
+<term>ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master</term>
+<listitem><para>
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received.  This may be due to an internal programming error; please
+submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_SOA">
+<term>ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record</term>
+<listitem><para>
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record.  The load has
+been abandoned.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_TIMER_THREAD">
+<term>ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running</term>
+<listitem><para>
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running.  This may indicate an
+internal program error.  Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_CLASS">
+<term>ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_NO_ZONE_NAME">
+<term>ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone</term>
+<listitem><para>
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_NOTIFY">
+<term>ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel.  The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_SHUTDOWN">
+<term>ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_UNKNOWN">
+<term>ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'</term>
+<listitem><para>
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel.  The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error.  Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_FAILED">
+<term>ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel.  The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_RECEIVE_XFRIN_SUCCESS">
+<term>ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)</term>
+<listitem><para>
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel.  The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_REFRESH_ZONE">
+<term>ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)</term>
+<listitem><para>
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SELECT_ERROR">
+<term>ZONEMGR_SELECT_ERROR error with select(): %1</term>
+<listitem><para>
+An attempt to wait for input from a socket failed.  The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SEND_FAIL">
+<term>ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed</term>
+<listitem><para>
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed.  The session between the modules has been closed.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_ERROR">
+<term>ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon.  The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SESSION_TIMEOUT">
+<term>ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon</term>
+<listitem><para>
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon.  The most usual cause of this
+problem is that the daemon is not running.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_SHUTDOWN">
+<term>ZONEMGR_SHUTDOWN zone manager has shut down</term>
+<listitem><para>
+A debug message, output when the zone manager has shut down completely.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_STARTING">
+<term>ZONEMGR_STARTING zone manager starting</term>
+<listitem><para>
+A debug message output when the zone manager starts up.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_TIMER_THREAD_RUNNING">
+<term>ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running</term>
+<listitem><para>
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running.  It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer.  Please submit
+a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_FAIL">
+<term>ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager.  This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case).  Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_NOTIFIED">
+<term>ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager</term>
+<listitem><para>
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager.  This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case).  Please submit a bug report.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="ZONEMGR_UNKNOWN_ZONE_SUCCESS">
+<term>ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager</term>
 <listitem><para>
-A debug message noting that the server has received a response instead of a
-query and is ignoring it.
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager.  This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
 </para></listitem>
 </varlistentry>
       </variablelist>
diff --git a/ext/asio/asio/impl/error_code.ipp b/ext/asio/asio/impl/error_code.ipp
index ed37a17..218c09b 100644
--- a/ext/asio/asio/impl/error_code.ipp
+++ b/ext/asio/asio/impl/error_code.ipp
@@ -11,6 +11,9 @@
 #ifndef ASIO_IMPL_ERROR_CODE_IPP
 #define ASIO_IMPL_ERROR_CODE_IPP
 
+// strerror() needs <cstring>
+#include <cstring>
+
 #if defined(_MSC_VER) && (_MSC_VER >= 1200)
 # pragma once
 #endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
diff --git a/src/bin/auth/Makefile.am b/src/bin/auth/Makefile.am
index 49f9d9a..3d60432 100644
--- a/src/bin/auth/Makefile.am
+++ b/src/bin/auth/Makefile.am
@@ -50,12 +50,19 @@ b10_auth_SOURCES += command.cc command.h
 b10_auth_SOURCES += common.h common.cc
 b10_auth_SOURCES += statistics.cc statistics.h
 b10_auth_SOURCES += main.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+b10_auth_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
 
 nodist_b10_auth_SOURCES = auth_messages.h auth_messages.cc
 EXTRA_DIST += auth_messages.mes
 
 b10_auth_LDADD =  $(top_builddir)/src/lib/datasrc/libdatasrc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+b10_auth_LDADD += $(top_builddir)/src/lib/util/libutil.la
 b10_auth_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
 b10_auth_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 b10_auth_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/bin/auth/auth.spec.pre.in b/src/bin/auth/auth.spec.pre.in
index d88ffb5..2ce044e 100644
--- a/src/bin/auth/auth.spec.pre.in
+++ b/src/bin/auth/auth.spec.pre.in
@@ -122,6 +122,24 @@
           }
         ]
       }
+    ],
+    "statistics": [
+      {
+        "item_name": "queries.tcp",
+        "item_type": "integer",
+        "item_optional": false,
+        "item_default": 0,
+        "item_title": "Queries TCP ",
+        "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+      },
+      {
+        "item_name": "queries.udp",
+        "item_type": "integer",
+        "item_optional": false,
+        "item_default": 0,
+        "item_title": "Queries UDP",
+        "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+      }
     ]
   }
 }
diff --git a/src/bin/auth/auth_config.cc b/src/bin/auth/auth_config.cc
index 2943cb5..d684c68 100644
--- a/src/bin/auth/auth_config.cc
+++ b/src/bin/auth/auth_config.cc
@@ -107,7 +107,7 @@ DatasourcesConfig::commit() {
     // server implementation details, and isn't scalable wrt the number of
     // data source types, and should eventually be improved.
     // Currently memory data source for class IN is the only possibility.
-    server_.setMemoryDataSrc(RRClass::IN(), AuthSrv::MemoryDataSrcPtr());
+    server_.setInMemoryClient(RRClass::IN(), AuthSrv::InMemoryClientPtr());
 
     BOOST_FOREACH(shared_ptr<AuthConfigParser> datasrc_config, datasources_) {
         datasrc_config->commit();
@@ -125,12 +125,12 @@ public:
     {}
     virtual void build(ConstElementPtr config_value);
     virtual void commit() {
-        server_.setMemoryDataSrc(rrclass_, memory_datasrc_);
+        server_.setInMemoryClient(rrclass_, memory_client_);
     }
 private:
     AuthSrv& server_;
     RRClass rrclass_;
-    AuthSrv::MemoryDataSrcPtr memory_datasrc_;
+    AuthSrv::InMemoryClientPtr memory_client_;
 };
 
 void
@@ -143,8 +143,8 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
     // We'd eventually optimize building zones (in case of reloading) by
     // selectively loading fresh zones.  Right now we simply check the
     // RR class is supported by the server implementation.
-    server_.getMemoryDataSrc(rrclass_);
-    memory_datasrc_ = AuthSrv::MemoryDataSrcPtr(new MemoryDataSrc());
+    server_.getInMemoryClient(rrclass_);
+    memory_client_ = AuthSrv::InMemoryClientPtr(new InMemoryClient());
 
     ConstElementPtr zones_config = config_value->get("zones");
     if (!zones_config) {
@@ -163,9 +163,10 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
             isc_throw(AuthConfigError, "Missing zone file for zone: "
                       << origin->str());
         }
-        shared_ptr<MemoryZone> new_zone(new MemoryZone(rrclass_,
+        shared_ptr<InMemoryZoneFinder> zone_finder(new
+                                                   InMemoryZoneFinder(rrclass_,
             Name(origin->stringValue())));
-        const result::Result result = memory_datasrc_->addZone(new_zone);
+        const result::Result result = memory_client_->addZone(zone_finder);
         if (result == result::EXIST) {
             isc_throw(AuthConfigError, "zone "<< origin->str()
                       << " already exists");
@@ -177,7 +178,7 @@ MemoryDatasourceConfig::build(ConstElementPtr config_value) {
          * need the load method to be split into some kind of build and
          * commit/abort parts.
          */
-        new_zone->load(file->stringValue());
+        zone_finder->load(file->stringValue());
     }
 }
 
diff --git a/src/bin/auth/auth_log.h b/src/bin/auth/auth_log.h
index 5205624..e0cae0f 100644
--- a/src/bin/auth/auth_log.h
+++ b/src/bin/auth/auth_log.h
@@ -28,19 +28,19 @@ namespace auth {
 /// output.
 
 // Debug messages indicating normal startup are logged at this debug level.
-const int DBG_AUTH_START = 10;
+const int DBG_AUTH_START = DBGLVL_START_SHUT;
 
 // Debug level used to log setting information (such as configuration changes).
-const int DBG_AUTH_OPS = 30;
+const int DBG_AUTH_OPS = DBGLVL_COMMAND;
 
 // Trace detailed operations, including errors raised when processing invalid
 // packets.  (These are not logged at severities of WARN or higher for fear
 // that a set of deliberately invalid packets set to the authoritative server
 // could overwhelm the logging.)
-const int DBG_AUTH_DETAIL = 50;
+const int DBG_AUTH_DETAIL = DBGLVL_TRACE_BASIC;
 
 // This level is used to log the contents of packets received and sent.
-const int DBG_AUTH_MESSAGES = 70;
+const int DBG_AUTH_MESSAGES = DBGLVL_TRACE_DETAIL_DATA;
 
 /// Define the logger for the "auth" module part of b10-auth.  We could define
 /// a logger in each file, but we would want to define a common name to avoid
diff --git a/src/bin/auth/auth_messages.mes b/src/bin/auth/auth_messages.mes
index 2bb402c..4706690 100644
--- a/src/bin/auth/auth_messages.mes
+++ b/src/bin/auth/auth_messages.mes
@@ -63,7 +63,7 @@ datebase data source, listing the file that is being accessed.
 
 % AUTH_DNS_SERVICES_CREATED DNS services created
 This is a debug message indicating that the component that will handling
-incoming queries for the authoritiative server (DNSServices) has been
+incoming queries for the authoritative server (DNSServices) has been
 successfully created. It is issued during server startup is an indication
 that the initialization is proceeding normally.
 
@@ -74,7 +74,7 @@ reason for the failure is given in the message.) The server will drop the
 packet.
 
 % AUTH_LOAD_TSIG loading TSIG keys
-This is a debug message indicating that the authoritiative server
+This is a debug message indicating that the authoritative server
 has requested the keyring holding TSIG keys from the configuration
 database. It is issued during server startup is an indication that the
 initialization is proceeding normally.
@@ -141,8 +141,8 @@ encountered an internal error whilst processing a received packet:
 the cause of the error is included in the message.
 
 The server will return a SERVFAIL error code to the sender of the packet.
-However, this message indicates a potential error in the server.
-Please open a bug ticket for this issue.
+This message indicates a potential error in the server.  Please open a
+bug ticket for this issue.
 
 % AUTH_RECEIVED_COMMAND command '%1' received
 This is a debug message issued when the authoritative server has received
@@ -209,7 +209,7 @@ channel.  It is issued during server startup is an indication that the
 initialization is proceeding normally.
 
 % AUTH_STATS_COMMS communication error in sending statistics data: %1
-An error was encountered when the authoritiative server tried to send data
+An error was encountered when the authoritative server tried to send data
 to the statistics daemon. The message includes additional information
 describing the reason for the failure.
 
@@ -257,4 +257,6 @@ request. The zone manager component has been informed of the request,
 but has returned an error response (which is included in the message). The
 NOTIFY request will not be honored.
 
-
+% AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index 7d3589a..da05e48 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -91,9 +91,9 @@ public:
     bool processNormalQuery(const IOMessage& io_message, MessagePtr message,
                             OutputBufferPtr buffer,
                             auto_ptr<TSIGContext> tsig_context);
-    bool processAxfrQuery(const IOMessage& io_message, MessagePtr message,
-                          OutputBufferPtr buffer,
-                          auto_ptr<TSIGContext> tsig_context);
+    bool processXfrQuery(const IOMessage& io_message, MessagePtr message,
+                         OutputBufferPtr buffer,
+                         auto_ptr<TSIGContext> tsig_context);
     bool processNotify(const IOMessage& io_message, MessagePtr message,
                        OutputBufferPtr buffer,
                        auto_ptr<TSIGContext> tsig_context);
@@ -108,8 +108,8 @@ public:
     AbstractSession* xfrin_session_;
 
     /// In-memory data source.  Currently class IN only for simplicity.
-    const RRClass memory_datasrc_class_;
-    AuthSrv::MemoryDataSrcPtr memory_datasrc_;
+    const RRClass memory_client_class_;
+    AuthSrv::InMemoryClientPtr memory_client_;
 
     /// Hot spot cache
     isc::datasrc::HotCache cache_;
@@ -125,6 +125,10 @@ public:
 
     /// The TSIG keyring
     const shared_ptr<TSIGKeyRing>* keyring_;
+
+    /// Bind the ModuleSpec object in config_session_ with
+    /// isc:config::ModuleSpec::validateStatistics.
+    void registerStatisticsValidator();
 private:
     std::string db_file_;
 
@@ -139,13 +143,16 @@ private:
 
     /// Increment query counter
     void incCounter(const int protocol);
+
+    // validateStatistics
+    bool validateStatistics(isc::data::ConstElementPtr data) const;
 };
 
 AuthSrvImpl::AuthSrvImpl(const bool use_cache,
                          AbstractXfroutClient& xfrout_client) :
     config_session_(NULL),
     xfrin_session_(NULL),
-    memory_datasrc_class_(RRClass::IN()),
+    memory_client_class_(RRClass::IN()),
     statistics_timer_(io_service_),
     counters_(),
     keyring_(NULL),
@@ -212,8 +219,9 @@ class ConfigChecker : public SimpleCallback {
 public:
     ConfigChecker(AuthSrv* srv) : server_(srv) {}
     virtual void operator()(const IOMessage&) const {
-        if (server_->getConfigSession()->hasQueuedMsgs()) {
-            server_->getConfigSession()->checkCommand();
+        ModuleCCSession* cfg_session = server_->getConfigSession();
+        if (cfg_session != NULL && cfg_session->hasQueuedMsgs()) {
+            cfg_session->checkCommand();
         }
     }
 private:
@@ -317,6 +325,7 @@ AuthSrv::setXfrinSession(AbstractSession* xfrin_session) {
 void
 AuthSrv::setConfigSession(ModuleCCSession* config_session) {
     impl_->config_session_ = config_session;
+    impl_->registerStatisticsValidator();
 }
 
 void
@@ -329,34 +338,34 @@ AuthSrv::getConfigSession() const {
     return (impl_->config_session_);
 }
 
-AuthSrv::MemoryDataSrcPtr
-AuthSrv::getMemoryDataSrc(const RRClass& rrclass) {
+AuthSrv::InMemoryClientPtr
+AuthSrv::getInMemoryClient(const RRClass& rrclass) {
     // XXX: for simplicity, we only support the IN class right now.
-    if (rrclass != impl_->memory_datasrc_class_) {
+    if (rrclass != impl_->memory_client_class_) {
         isc_throw(InvalidParameter,
                   "Memory data source is not supported for RR class "
                   << rrclass);
     }
-    return (impl_->memory_datasrc_);
+    return (impl_->memory_client_);
 }
 
 void
-AuthSrv::setMemoryDataSrc(const isc::dns::RRClass& rrclass,
-                          MemoryDataSrcPtr memory_datasrc)
+AuthSrv::setInMemoryClient(const isc::dns::RRClass& rrclass,
+                           InMemoryClientPtr memory_client)
 {
     // XXX: see above
-    if (rrclass != impl_->memory_datasrc_class_) {
+    if (rrclass != impl_->memory_client_class_) {
         isc_throw(InvalidParameter,
                   "Memory data source is not supported for RR class "
                   << rrclass);
-    } else if (!impl_->memory_datasrc_ && memory_datasrc) {
+    } else if (!impl_->memory_client_ && memory_client) {
         LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_ENABLED)
                   .arg(rrclass);
-    } else if (impl_->memory_datasrc_ && !memory_datasrc) {
+    } else if (impl_->memory_client_ && !memory_client) {
         LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_MEM_DATASRC_DISABLED)
                   .arg(rrclass);
     }
-    impl_->memory_datasrc_ = memory_datasrc;
+    impl_->memory_client_ = memory_client;
 }
 
 uint32_t
@@ -464,10 +473,11 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
         ConstQuestionPtr question = *message->beginQuestion();
         const RRType &qtype = question->getType();
         if (qtype == RRType::AXFR()) {
-            sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
-                                                 tsig_context);
+            sendAnswer = impl_->processXfrQuery(io_message, message, buffer,
+                                                tsig_context);
         } else if (qtype == RRType::IXFR()) {
-            makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
+            sendAnswer = impl_->processXfrQuery(io_message, message, buffer,
+                                                tsig_context);
         } else {
             sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
                                                    tsig_context);
@@ -505,10 +515,10 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
         // If a memory data source is configured call the separate
         // Query::process()
         const ConstQuestionPtr question = *message->beginQuestion();
-        if (memory_datasrc_ && memory_datasrc_class_ == question->getClass()) {
+        if (memory_client_ && memory_client_class_ == question->getClass()) {
             const RRType& qtype = question->getType();
             const Name& qname = question->getName();
-            auth::Query(*memory_datasrc_, qname, qtype, *message).process();
+            auth::Query(*memory_client_, qname, qtype, *message).process();
         } else {
             datasrc::Query query(*message, cache_, dnssec_ok);
             data_sources_.doQuery(query);
@@ -535,9 +545,9 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
 }
 
 bool
-AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
-                              OutputBufferPtr buffer,
-                              auto_ptr<TSIGContext> tsig_context)
+AuthSrvImpl::processXfrQuery(const IOMessage& io_message, MessagePtr message,
+                             OutputBufferPtr buffer,
+                             auto_ptr<TSIGContext> tsig_context)
 {
     // Increment query counter.
     incCounter(io_message.getSocket().getProtocol());
@@ -670,6 +680,22 @@ AuthSrvImpl::incCounter(const int protocol) {
     }
 }
 
+void
+AuthSrvImpl::registerStatisticsValidator() {
+    counters_.registerStatisticsValidator(
+        boost::bind(&AuthSrvImpl::validateStatistics, this, _1));
+}
+
+bool
+AuthSrvImpl::validateStatistics(isc::data::ConstElementPtr data) const {
+    if (config_session_ == NULL) {
+        return (false);
+    }
+    return (
+        config_session_->getModuleSpec().validateStatistics(
+            data, true));
+}
+
 ConstElementPtr
 AuthSrvImpl::setDbFile(ConstElementPtr config) {
     ConstElementPtr answer = isc::config::createAnswer();
diff --git a/src/bin/auth/auth_srv.h b/src/bin/auth/auth_srv.h
index f075ab2..a50e427 100644
--- a/src/bin/auth/auth_srv.h
+++ b/src/bin/auth/auth_srv.h
@@ -17,7 +17,7 @@
 
 #include <string>
 
-// For MemoryDataSrcPtr below.  This should be a temporary definition until
+// For InMemoryClientPtr below.  This should be a temporary definition until
 // we reorganize the data source framework.
 #include <boost/shared_ptr.hpp>
 
@@ -39,7 +39,7 @@
 
 namespace isc {
 namespace datasrc {
-class MemoryDataSrc;
+class InMemoryClient;
 }
 namespace xfr {
 class AbstractXfroutClient;
@@ -133,7 +133,7 @@ public:
     /// If there is a data source installed, it will be replaced with the
     /// new one.
     ///
-    /// In the current implementation, the SQLite data source and MemoryDataSrc
+    /// In the current implementation, the SQLite data source and InMemoryClient
     /// are assumed.
     /// We can enable memory data source and get the path of SQLite database by
     /// the \c config parameter.  If we disabled memory data source, the SQLite
@@ -233,16 +233,16 @@ public:
     ///
     void setXfrinSession(isc::cc::AbstractSession* xfrin_session);
 
-    /// A shared pointer type for \c MemoryDataSrc.
+    /// A shared pointer type for \c InMemoryClient.
     ///
     /// This is defined inside the \c AuthSrv class as it's supposed to be
     /// a short term interface until we integrate the in-memory and other
     /// data source frameworks.
-    typedef boost::shared_ptr<isc::datasrc::MemoryDataSrc> MemoryDataSrcPtr;
+    typedef boost::shared_ptr<isc::datasrc::InMemoryClient> InMemoryClientPtr;
 
-    /// An immutable shared pointer type for \c MemoryDataSrc.
-    typedef boost::shared_ptr<const isc::datasrc::MemoryDataSrc>
-    ConstMemoryDataSrcPtr;
+    /// An immutable shared pointer type for \c InMemoryClient.
+    typedef boost::shared_ptr<const isc::datasrc::InMemoryClient>
+    ConstInMemoryClientPtr;
 
     /// Returns the in-memory data source configured for the \c AuthSrv,
     /// if any.
@@ -260,11 +260,11 @@ public:
     /// \param rrclass The RR class of the requested in-memory data source.
     /// \return A pointer to the in-memory data source, if configured;
     /// otherwise NULL.
-    MemoryDataSrcPtr getMemoryDataSrc(const isc::dns::RRClass& rrclass);
+    InMemoryClientPtr getInMemoryClient(const isc::dns::RRClass& rrclass);
 
     /// Sets or replaces the in-memory data source of the specified RR class.
     ///
-    /// As noted in \c getMemoryDataSrc(), some RR classes may not be
+    /// As noted in \c getInMemoryClient(), some RR classes may not be
     /// supported, in which case an exception of class \c InvalidParameter
     /// will be thrown.
     /// This method never throws an exception otherwise.
@@ -275,9 +275,9 @@ public:
     /// in-memory data source.
     ///
     /// \param rrclass The RR class of the in-memory data source to be set.
-    /// \param memory_datasrc A (shared) pointer to \c MemoryDataSrc to be set.
-    void setMemoryDataSrc(const isc::dns::RRClass& rrclass,
-                          MemoryDataSrcPtr memory_datasrc);
+    /// \param memory_datasrc A (shared) pointer to \c InMemoryClient to be set.
+    void setInMemoryClient(const isc::dns::RRClass& rrclass,
+                           InMemoryClientPtr memory_client);
 
     /// \brief Set the communication session with Statistics.
     ///
diff --git a/src/bin/auth/b10-auth.8 b/src/bin/auth/b10-auth.8
index 0356683..aedadee 100644
--- a/src/bin/auth/b10-auth.8
+++ b/src/bin/auth/b10-auth.8
@@ -2,12 +2,12 @@
 .\"     Title: b10-auth
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: March 8, 2011
+.\"      Date: August 11, 2011
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"
-.TH "B10\-AUTH" "8" "March 8, 2011" "BIND10" "BIND10"
+.TH "B10\-AUTH" "8" "August 11, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" -----------------------------------------------------------------
@@ -70,18 +70,6 @@ defines the path to the SQLite3 zone file when using the sqlite datasource\&. Th
 /usr/local/var/bind10\-devel/zone\&.sqlite3\&.
 .PP
 
-\fIlisten_on\fR
-is a list of addresses and ports for
-\fBb10\-auth\fR
-to listen on\&. The list items are the
-\fIaddress\fR
-string and
-\fIport\fR
-number\&. By default,
-\fBb10\-auth\fR
-listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
-.PP
-
 \fIdatasources\fR
 configures data sources\&. The list items include:
 \fItype\fR
@@ -114,6 +102,18 @@ In this development version, currently this is only used for the memory data sou
 .RE
 .PP
 
+\fIlisten_on\fR
+is a list of addresses and ports for
+\fBb10\-auth\fR
+to listen on\&. The list items are the
+\fIaddress\fR
+string and
+\fIport\fR
+number\&. By default,
+\fBb10\-auth\fR
+listens on port 53 on the IPv6 (::) and IPv4 (0\&.0\&.0\&.0) wildcard addresses\&.
+.PP
+
 \fIstatistics\-interval\fR
 is the timer interval in seconds for
 \fBb10\-auth\fR
@@ -164,6 +164,25 @@ immediately\&.
 \fBshutdown\fR
 exits
 \fBb10\-auth\fR\&. (Note that the BIND 10 boss process will restart this service\&.)
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+auth\&.queries\&.tcp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over TCP since startup\&.
+.RE
+.PP
+auth\&.queries\&.udp
+.RS 4
+Total count of queries received by the
+\fBb10\-auth\fR
+server over UDP since startup\&.
+.RE
 .SH "FILES"
 .PP
 
diff --git a/src/bin/auth/b10-auth.xml b/src/bin/auth/b10-auth.xml
index 2b53394..636f437 100644
--- a/src/bin/auth/b10-auth.xml
+++ b/src/bin/auth/b10-auth.xml
@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>March 8, 2011</date>
+    <date>August 11, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -132,15 +132,6 @@
     </para>
 
     <para>
-      <varname>listen_on</varname> is a list of addresses and ports for
-      <command>b10-auth</command> to listen on.
-      The list items are the <varname>address</varname> string
-      and <varname>port</varname> number.
-      By default, <command>b10-auth</command> listens on port 53
-      on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
-    </para>
-
-    <para>
       <varname>datasources</varname> configures data sources.
       The list items include:
       <varname>type</varname> to optionally choose the data source type
@@ -165,6 +156,15 @@
     </para>
 
     <para>
+      <varname>listen_on</varname> is a list of addresses and ports for
+      <command>b10-auth</command> to listen on.
+      The list items are the <varname>address</varname> string
+      and <varname>port</varname> number.
+      By default, <command>b10-auth</command> listens on port 53
+      on the IPv6 (::) and IPv4 (0.0.0.0) wildcard addresses.
+    </para>
+
+    <para>
       <varname>statistics-interval</varname> is the timer interval
       in seconds for <command>b10-auth</command> to share its
       statistics information to
@@ -209,6 +209,34 @@
   </refsect1>
 
   <refsect1>
+    <title>STATISTICS DATA</title>
+
+    <para>
+      The statistics data collected by the <command>b10-stats</command>
+      daemon include:
+    </para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term>auth.queries.tcp</term>
+        <listitem><simpara>Total count of queries received by the
+          <command>b10-auth</command> server over TCP since startup.
+        </simpara></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>auth.queries.udp</term>
+        <listitem><simpara>Total count of queries received by the
+          <command>b10-auth</command> server over UDP since startup.
+        </simpara></listitem>
+      </varlistentry>
+
+    </variablelist>
+
+  </refsect1>
+
+  <refsect1>
     <title>FILES</title>
     <para>
       <filename>/usr/local/var/bind10-devel/zone.sqlite3</filename>
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index 0ad59ea..fb348bb 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -13,10 +13,17 @@ query_bench_SOURCES += ../auth_srv.h ../auth_srv.cc
 query_bench_SOURCES += ../auth_config.h ../auth_config.cc
 query_bench_SOURCES += ../statistics.h ../statistics.cc
 query_bench_SOURCES += ../auth_log.h ../auth_log.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+query_bench_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
 
 nodist_query_bench_SOURCES = ../auth_messages.h ../auth_messages.cc
 
 query_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+query_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
 query_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 query_bench_LDADD += $(top_builddir)/src/lib/bench/libbench.la
 query_bench_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
diff --git a/src/bin/auth/command.cc b/src/bin/auth/command.cc
index fe3d729..940d57b 100644
--- a/src/bin/auth/command.cc
+++ b/src/bin/auth/command.cc
@@ -136,19 +136,21 @@ public:
         // that doesn't block other server operations.
         // TODO: we may (should?) want to check the "last load time" and
         // the timestamp of the file and skip loading if the file isn't newer.
-        shared_ptr<MemoryZone> newzone(new MemoryZone(oldzone->getClass(),
-                                                      oldzone->getOrigin()));
-        newzone->load(oldzone->getFileName());
-        oldzone->swap(*newzone);
+        shared_ptr<InMemoryZoneFinder> zone_finder(
+            new InMemoryZoneFinder(old_zone_finder->getClass(),
+                                   old_zone_finder->getOrigin()));
+        zone_finder->load(old_zone_finder->getFileName());
+        old_zone_finder->swap(*zone_finder);
         LOG_DEBUG(auth_logger, DBG_AUTH_OPS, AUTH_LOAD_ZONE)
-                  .arg(newzone->getOrigin()).arg(newzone->getClass());
+                  .arg(zone_finder->getOrigin()).arg(zone_finder->getClass());
     }
 
 private:
-    shared_ptr<MemoryZone> oldzone; // zone to be updated with the new file.
+    // zone finder to be updated with the new file.
+    shared_ptr<InMemoryZoneFinder> old_zone_finder;
 
     // A helper private method to parse and validate command parameters.
-    // On success, it sets 'oldzone' to the zone to be updated.
+    // On success, it sets 'old_zone_finder' to the zone to be updated.
     // It returns true if everything is okay; and false if the command is
     // valid but there's no need for further process.
     bool validate(AuthSrv& server, isc::data::ConstElementPtr args) {
@@ -176,7 +178,7 @@ private:
         const RRClass zone_class = class_elem ?
             RRClass(class_elem->stringValue()) : RRClass::IN();
 
-        AuthSrv::MemoryDataSrcPtr datasrc(server.getMemoryDataSrc(zone_class));
+        AuthSrv::InMemoryClientPtr datasrc(server.getInMemoryClient(zone_class));
         if (datasrc == NULL) {
             isc_throw(AuthCommandError, "Memory data source is disabled");
         }
@@ -188,13 +190,14 @@ private:
         const Name origin(origin_elem->stringValue());
 
         // Get the current zone
-        const MemoryDataSrc::FindResult result = datasrc->findZone(origin);
+        const InMemoryClient::FindResult result = datasrc->findZone(origin);
         if (result.code != result::SUCCESS) {
             isc_throw(AuthCommandError, "Zone " << origin <<
                       " is not found in data source");
         }
 
-        oldzone = boost::dynamic_pointer_cast<MemoryZone>(result.zone);
+        old_zone_finder = boost::dynamic_pointer_cast<InMemoryZoneFinder>(
+            result.zone_finder);
 
         return (true);
     }
diff --git a/src/bin/auth/common.cc b/src/bin/auth/common.cc
index 35381a1..a7031f3 100644
--- a/src/bin/auth/common.cc
+++ b/src/bin/auth/common.cc
@@ -12,22 +12,25 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <string>
+
 #include <auth/common.h>
 #include <auth/spec_config.h>
 #include <stdlib.h>
 
 using std::string;
 
-string getXfroutSocketPath() {
+string
+getXfroutSocketPath() {
     if (getenv("B10_FROM_BUILD") != NULL) {
-        if (getenv("B10_FROM_SOURCE_LOCALSTATEDIR")) {
+        if (getenv("B10_FROM_SOURCE_LOCALSTATEDIR") != NULL) {
             return (string(getenv("B10_FROM_SOURCE_LOCALSTATEDIR")) +
                     "/auth_xfrout_conn");
         } else {
             return (string(getenv("B10_FROM_BUILD")) + "/auth_xfrout_conn");
         }
     } else {
-        if (getenv("BIND10_XFROUT_SOCKET_FILE")) {
+        if (getenv("BIND10_XFROUT_SOCKET_FILE") != NULL) {
             return (getenv("BIND10_XFROUT_SOCKET_FILE"));
         } else {
             return (UNIX_SOCKET_FILE);
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index 323f890..f159262 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -12,6 +12,7 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <algorithm>            // for std::max
 #include <vector>
 #include <boost/foreach.hpp>
 
@@ -19,7 +20,7 @@
 #include <dns/rcode.h>
 #include <dns/rdataclass.h>
 
-#include <datasrc/memory_datasrc.h>
+#include <datasrc/client.h>
 
 #include <auth/query.h>
 
@@ -31,24 +32,24 @@ namespace isc {
 namespace auth {
 
 void
-Query::getAdditional(const Zone& zone, const RRset& rrset) const {
+Query::addAdditional(ZoneFinder& zone, const RRset& rrset) {
     RdataIteratorPtr rdata_iterator(rrset.getRdataIterator());
     for (; !rdata_iterator->isLast(); rdata_iterator->next()) {
         const Rdata& rdata(rdata_iterator->getCurrent());
         if (rrset.getType() == RRType::NS()) {
             // Need to perform the search in the "GLUE OK" mode.
             const generic::NS& ns = dynamic_cast<const generic::NS&>(rdata);
-            findAddrs(zone, ns.getNSName(), Zone::FIND_GLUE_OK);
+            addAdditionalAddrs(zone, ns.getNSName(), ZoneFinder::FIND_GLUE_OK);
         } else if (rrset.getType() == RRType::MX()) {
             const generic::MX& mx(dynamic_cast<const generic::MX&>(rdata));
-            findAddrs(zone, mx.getMXName());
+            addAdditionalAddrs(zone, mx.getMXName());
         }
     }
 }
 
 void
-Query::findAddrs(const Zone& zone, const Name& qname,
-                 const Zone::FindOptions options) const
+Query::addAdditionalAddrs(ZoneFinder& zone, const Name& qname,
+                          const ZoneFinder::FindOptions options)
 {
     // Out of zone name
     NameComparisonResult result = zone.getOrigin().compare(qname);
@@ -66,32 +67,33 @@ Query::findAddrs(const Zone& zone, const Name& qname,
 
     // Find A rrset
     if (qname_ != qname || qtype_ != RRType::A()) {
-        Zone::FindResult a_result = zone.find(qname, RRType::A(), NULL,
-                                              options);
-        if (a_result.code == Zone::SUCCESS) {
+        ZoneFinder::FindResult a_result = zone.find(qname, RRType::A(), NULL,
+                                                    options | dnssec_opt_);
+        if (a_result.code == ZoneFinder::SUCCESS) {
             response_.addRRset(Message::SECTION_ADDITIONAL,
-                    boost::const_pointer_cast<RRset>(a_result.rrset));
+                    boost::const_pointer_cast<RRset>(a_result.rrset), dnssec_);
         }
     }
 
     // Find AAAA rrset
     if (qname_ != qname || qtype_ != RRType::AAAA()) {
-        Zone::FindResult aaaa_result =
-            zone.find(qname, RRType::AAAA(), NULL, options);
-        if (aaaa_result.code == Zone::SUCCESS) {
+        ZoneFinder::FindResult aaaa_result =
+            zone.find(qname, RRType::AAAA(), NULL, options | dnssec_opt_);
+        if (aaaa_result.code == ZoneFinder::SUCCESS) {
             response_.addRRset(Message::SECTION_ADDITIONAL,
-                    boost::const_pointer_cast<RRset>(aaaa_result.rrset));
+                    boost::const_pointer_cast<RRset>(aaaa_result.rrset),
+                    dnssec_);
         }
     }
 }
 
 void
-Query::putSOA(const Zone& zone) const {
-    Zone::FindResult soa_result(zone.find(zone.getOrigin(),
-        RRType::SOA()));
-    if (soa_result.code != Zone::SUCCESS) {
+Query::addSOA(ZoneFinder& finder) {
+    ZoneFinder::FindResult soa_result(finder.find(finder.getOrigin(),
+        RRType::SOA(), NULL, dnssec_opt_));
+    if (soa_result.code != ZoneFinder::SUCCESS) {
         isc_throw(NoSOA, "There's no SOA record in zone " <<
-            zone.getOrigin().toText());
+            finder.getOrigin().toText());
     } else {
         /*
          * FIXME:
@@ -99,34 +101,140 @@ Query::putSOA(const Zone& zone) const {
          * to insist.
          */
         response_.addRRset(Message::SECTION_AUTHORITY,
-            boost::const_pointer_cast<RRset>(soa_result.rrset));
+            boost::const_pointer_cast<RRset>(soa_result.rrset), dnssec_);
     }
 }
 
+// Note: unless the data source client implementation or the zone content
+// is broken, 'nsec' should be a valid NSEC RR.  Likewise, the call to
+// find() in this method should result in NXDOMAIN and an NSEC RR that proves
+// the non existent of matching wildcard.  If these assumptions aren't met
+// due to a buggy data source implementation or a broken zone, we'll let
+// underlying libdns++ modules throw an exception, which would result in
+// either an SERVFAIL response or just ignoring the query.  We at least prevent
+// a complete crash due to such broken behavior.
 void
-Query::getAuthAdditional(const Zone& zone) const {
+Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
+    if (nsec->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "NSEC for NXDOMAIN is empty");
+    }
+
+    // Add the NSEC proving NXDOMAIN to the authority section.
+    response_.addRRset(Message::SECTION_AUTHORITY,
+                       boost::const_pointer_cast<RRset>(nsec), dnssec_);
+
+    // Next, identify the best possible wildcard name that would match
+    // the query name.  It's the longer common suffix with the qname
+    // between the owner or the next domain of the NSEC that proves NXDOMAIN,
+    // prefixed by the wildcard label, "*".  For example, for query name
+    // a.b.example.com, if the NXDOMAIN NSEC is
+    // b.example.com. NSEC c.example.com., the longer suffix is b.example.com.,
+    // and the best possible wildcard is *.b.example.com.  If the NXDOMAIN
+    // NSEC is a.example.com. NSEC c.b.example.com., the longer suffix
+    // is the next domain of the NSEC, and we get the same wildcard name.
+    const int qlabels = qname_.getLabelCount();
+    const int olabels = qname_.compare(nsec->getName()).getCommonLabels();
+    const int nlabels = qname_.compare(
+        dynamic_cast<const generic::NSEC&>(nsec->getRdataIterator()->
+                                           getCurrent()).
+        getNextName()).getCommonLabels();
+    const int common_labels = std::max(olabels, nlabels);
+    const Name wildname(Name("*").concatenate(qname_.split(qlabels -
+                                                           common_labels)));
+
+    // Confirm the wildcard doesn't exist (this should result in NXDOMAIN;
+    // otherwise we shouldn't have got NXDOMAIN for the original query in
+    // the first place).
+    const ZoneFinder::FindResult fresult = finder.find(wildname,
+                                                       RRType::NSEC(), NULL,
+                                                       dnssec_opt_);
+    if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+        fresult.rrset->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "Unexpected result for wildcard NXDOMAIN proof");
+    }
+
+    // Add the (no-) wildcard proof only when it's different from the NSEC
+    // that proves NXDOMAIN; sometimes they can be the same.
+    // Note: name comparison is relatively expensive.  When we are at the
+    // stage of performance optimization, we should consider optimizing this
+    // for some optimized data source implementations.
+    if (nsec->getName() != fresult.rrset->getName()) {
+        response_.addRRset(Message::SECTION_AUTHORITY,
+                           boost::const_pointer_cast<RRset>(fresult.rrset),
+                           dnssec_);
+    }
+}
+
+void
+Query::addWildcardProof(ZoneFinder& finder) {
+    // The query name shouldn't exist in the zone if there were no wildcard
+    // substitution.  Confirm that by specifying NO_WILDCARD.  It should result
+    // in NXDOMAIN and an NSEC RR that proves it should be returned.
+    const ZoneFinder::FindResult fresult =
+        finder.find(qname_, RRType::NSEC(), NULL,
+                    dnssec_opt_ | ZoneFinder::NO_WILDCARD);
+    if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+        fresult.rrset->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "Unexpected result for wildcard proof");
+    }
+    response_.addRRset(Message::SECTION_AUTHORITY,
+                       boost::const_pointer_cast<RRset>(fresult.rrset),
+                       dnssec_);
+}
+
+void
+Query::addWildcardNXRRSETProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
+    // There should be one NSEC RR which was found in the zone to prove
+    // that there is not matched <QNAME,QTYPE> via wildcard expansion.
+    if (nsec->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "NSEC for WILDCARD_NXRRSET is empty");
+    }
+    // Add this NSEC RR to authority section.
+    response_.addRRset(Message::SECTION_AUTHORITY,
+                      boost::const_pointer_cast<RRset>(nsec), dnssec_);
+    
+    const ZoneFinder::FindResult fresult =
+        finder.find(qname_, RRType::NSEC(), NULL,
+                    dnssec_opt_ | ZoneFinder::NO_WILDCARD);
+    if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+        fresult.rrset->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "Unexpected result for no match QNAME proof");
+    }
+   
+    if (nsec->getName() != fresult.rrset->getName()) {
+        // one NSEC RR proves wildcard_nxrrset that no matched QNAME.
+        response_.addRRset(Message::SECTION_AUTHORITY,
+                           boost::const_pointer_cast<RRset>(fresult.rrset),
+                           dnssec_);
+    }
+}
+    
+void
+Query::addAuthAdditional(ZoneFinder& finder) {
     // Fill in authority and addtional sections.
-    Zone::FindResult ns_result = zone.find(zone.getOrigin(), RRType::NS());
+    ZoneFinder::FindResult ns_result = finder.find(finder.getOrigin(),
+                                                   RRType::NS(), NULL,
+                                                   dnssec_opt_);
     // zone origin name should have NS records
-    if (ns_result.code != Zone::SUCCESS) {
+    if (ns_result.code != ZoneFinder::SUCCESS) {
         isc_throw(NoApexNS, "There's no apex NS records in zone " <<
-                zone.getOrigin().toText());
+                finder.getOrigin().toText());
     } else {
         response_.addRRset(Message::SECTION_AUTHORITY,
-            boost::const_pointer_cast<RRset>(ns_result.rrset));
+            boost::const_pointer_cast<RRset>(ns_result.rrset), dnssec_);
         // Handle additional for authority section
-        getAdditional(zone, *ns_result.rrset);
+        addAdditional(finder, *ns_result.rrset);
     }
 }
 
 void
-Query::process() const {
+Query::process() {
     bool keep_doing = true;
     const bool qtype_is_any = (qtype_ == RRType::ANY());
 
     response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
-    const MemoryDataSrc::FindResult result =
-        memory_datasrc_.findZone(qname_);
+    const DataSourceClient::FindResult result =
+        datasrc_client_.findZone(qname_);
 
     // If we have no matching authoritative zone for the query name, return
     // REFUSED.  In short, this is to be compatible with BIND 9, but the
@@ -138,6 +246,7 @@ Query::process() const {
         response_.setRcode(Rcode::REFUSED());
         return;
     }
+    ZoneFinder& zfinder = *result.zone_finder;
 
     // Found a zone which is the nearest ancestor to QNAME, set the AA bit
     response_.setHeaderFlag(Message::HEADERFLAG_AA);
@@ -145,14 +254,14 @@ Query::process() const {
     while (keep_doing) {
         keep_doing = false;
         std::auto_ptr<RRsetList> target(qtype_is_any ? new RRsetList : NULL);
-        const Zone::FindResult db_result(result.zone->find(qname_, qtype_,
-            target.get()));
-
+        const ZoneFinder::FindResult db_result(
+            zfinder.find(qname_, qtype_, target.get(), dnssec_opt_));
         switch (db_result.code) {
-            case Zone::DNAME: {
+            case ZoneFinder::DNAME: {
                 // First, put the dname into the answer
                 response_.addRRset(Message::SECTION_ANSWER,
-                    boost::const_pointer_cast<RRset>(db_result.rrset));
+                    boost::const_pointer_cast<RRset>(db_result.rrset),
+                    dnssec_);
                 /*
                  * Empty DNAME should never get in, as it is impossible to
                  * create one in master file.
@@ -188,10 +297,11 @@ Query::process() const {
                     qname_.getLabelCount() -
                     db_result.rrset->getName().getLabelCount()).
                     concatenate(dname.getDname())));
-                response_.addRRset(Message::SECTION_ANSWER, cname);
+                response_.addRRset(Message::SECTION_ANSWER, cname, dnssec_);
                 break;
             }
-            case Zone::CNAME:
+            case ZoneFinder::CNAME:
+            case ZoneFinder::WILDCARD_CNAME:
                 /*
                  * We don't do chaining yet. Therefore handling a CNAME is
                  * mostly the same as handling SUCCESS, but we didn't get
@@ -202,48 +312,84 @@ Query::process() const {
                  * So, just put it there.
                  */
                 response_.addRRset(Message::SECTION_ANSWER,
-                    boost::const_pointer_cast<RRset>(db_result.rrset));
+                    boost::const_pointer_cast<RRset>(db_result.rrset),
+                    dnssec_);
+
+                // If the answer is a result of wildcard substitution,
+                // add a proof that there's no closer name.
+                if (dnssec_ && db_result.code == ZoneFinder::WILDCARD_CNAME) {
+                    addWildcardProof(*result.zone_finder);
+                }
                 break;
-            case Zone::SUCCESS:
+            case ZoneFinder::SUCCESS:
+            case ZoneFinder::WILDCARD:
                 if (qtype_is_any) {
                     // If quety type is ANY, insert all RRs under the domain
                     // into answer section.
                     BOOST_FOREACH(RRsetPtr rrset, *target) {
-                        response_.addRRset(Message::SECTION_ANSWER, rrset);
+                        response_.addRRset(Message::SECTION_ANSWER, rrset,
+                                           dnssec_);
                         // Handle additional for answer section
-                        getAdditional(*result.zone, *rrset.get());
+                        addAdditional(*result.zone_finder, *rrset.get());
                     }
                 } else {
                     response_.addRRset(Message::SECTION_ANSWER,
-                        boost::const_pointer_cast<RRset>(db_result.rrset));
+                        boost::const_pointer_cast<RRset>(db_result.rrset),
+                        dnssec_);
                     // Handle additional for answer section
-                    getAdditional(*result.zone, *db_result.rrset);
+                    addAdditional(*result.zone_finder, *db_result.rrset);
                 }
                 // If apex NS records haven't been provided in the answer
                 // section, insert apex NS records into the authority section
                 // and AAAA/A RRS of each of the NS RDATA into the additional
                 // section.
-                if (qname_ != result.zone->getOrigin() ||
-                    db_result.code != Zone::SUCCESS ||
+                if (qname_ != result.zone_finder->getOrigin() ||
+                    db_result.code != ZoneFinder::SUCCESS ||
                     (qtype_ != RRType::NS() && !qtype_is_any))
                 {
-                    getAuthAdditional(*result.zone);
+                    addAuthAdditional(*result.zone_finder);
+                }
+
+                // If the answer is a result of wildcard substitution,
+                // add a proof that there's no closer name.
+                if (dnssec_ && db_result.code == ZoneFinder::WILDCARD) {
+                    addWildcardProof(*result.zone_finder);
                 }
                 break;
-            case Zone::DELEGATION:
+            case ZoneFinder::DELEGATION:
                 response_.setHeaderFlag(Message::HEADERFLAG_AA, false);
                 response_.addRRset(Message::SECTION_AUTHORITY,
-                    boost::const_pointer_cast<RRset>(db_result.rrset));
-                getAdditional(*result.zone, *db_result.rrset);
+                    boost::const_pointer_cast<RRset>(db_result.rrset),
+                    dnssec_);
+                addAdditional(*result.zone_finder, *db_result.rrset);
                 break;
-            case Zone::NXDOMAIN:
-                // Just empty answer with SOA in authority section
+            case ZoneFinder::NXDOMAIN:
                 response_.setRcode(Rcode::NXDOMAIN());
-                putSOA(*result.zone);
+                addSOA(*result.zone_finder);
+                if (dnssec_ && db_result.rrset) {
+                    addNXDOMAINProof(zfinder, db_result.rrset);
+                }
+                break;
+            case ZoneFinder::NXRRSET:
+                addSOA(*result.zone_finder);
+                if (dnssec_ && db_result.rrset) {
+                    response_.addRRset(Message::SECTION_AUTHORITY,
+                                       boost::const_pointer_cast<RRset>(
+                                           db_result.rrset),
+                                       dnssec_);
+                }
+                break;
+            case ZoneFinder::WILDCARD_NXRRSET:
+                addSOA(*result.zone_finder);
+                if (dnssec_ && db_result.rrset) {
+                    addWildcardNXRRSETProof(zfinder, db_result.rrset);
+                }
                 break;
-            case Zone::NXRRSET:
-                // Just empty answer with SOA in authority section
-                putSOA(*result.zone);
+            default:
+                // This is basically a bug of the data source implementation,
+                // but could also happen in the middle of development where
+                // we try to add a new result code.
+                isc_throw(isc::NotImplemented, "Unknown result code");
                 break;
         }
     }
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index e0c6323..43a8b6b 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -26,7 +26,7 @@ class RRset;
 }
 
 namespace datasrc {
-class MemoryDataSrc;
+class DataSourceClient;
 }
 
 namespace auth {
@@ -36,10 +36,8 @@ namespace auth {
 ///
 /// Many of the design details for this class are still in flux.
 /// We'll revisit and update them as we add more functionality, for example:
-/// - memory_datasrc parameter of the constructor.  It is a data source that
-///   uses in memory dedicated backend.
 /// - as a related point, we may have to pass the RR class of the query.
-///   in the initial implementation the RR class is an attribute of memory
+///   in the initial implementation the RR class is an attribute of
 ///   datasource and omitted.  It's not clear if this assumption holds with
 ///   generic data sources.  On the other hand, it will help keep
 ///   implementation simpler, and we might rather want to modify the design
@@ -51,7 +49,7 @@ namespace auth {
 ///   separate attribute setter.
 /// - likewise, we'll eventually need to do per zone access control, for which
 ///   we need querier's information such as its IP address.
-/// - memory_datasrc and response may better be parameters to process() instead
+/// - datasrc_client and response may better be parameters to process() instead
 ///   of the constructor.
 ///
 /// <b>Note:</b> The class name is intentionally the same as the one used in
@@ -71,10 +69,33 @@ private:
     /// Adds a SOA of the zone into the authority zone of response_.
     /// Can throw NoSOA.
     ///
-    void putSOA(const isc::datasrc::Zone& zone) const;
+    void addSOA(isc::datasrc::ZoneFinder& finder);
 
+    /// Add NSEC RRs that prove an NXDOMAIN result.
+    ///
+    /// This corresponds to Section 3.1.3.2 of RFC 4035.
+    void addNXDOMAINProof(isc::datasrc::ZoneFinder& finder,
+                          isc::dns::ConstRRsetPtr nsec);
+
+    /// Add NSEC RRs that prove a wildcard answer is the best one.
+    ///
+    /// This corresponds to Section 3.1.3.3 of RFC 4035.
+    void addWildcardProof(isc::datasrc::ZoneFinder& finder);
+
+    /// \brief Adds one NSEC RR proved no matched QNAME,one NSEC RR proved no
+    /// matched <QNAME,QTYPE> through wildcard extension.
+    ///
+    /// Add NSEC RRs that prove an WILDCARD_NXRRSET result.
+    /// This corresponds to Section 3.1.3.4 of RFC 4035.
+    /// \param finder The ZoneFinder through which the authority data for the
+    /// query is to be found.
+    /// \param nsec The RRset (NSEC RR) which proved that there is no matched 
+    /// <QNAME,QTTYPE>.
+    void addWildcardNXRRSETProof(isc::datasrc::ZoneFinder& finder,
+                                 isc::dns::ConstRRsetPtr nsec);
+    
     /// \brief Look up additional data (i.e., address records for the names
-    /// included in NS or MX records).
+    /// included in NS or MX records) and add them to the additional section.
     ///
     /// Note: Any additional data which has already been provided in the
     /// answer section (i.e., if the original query happend to be for the
@@ -83,12 +104,12 @@ private:
     /// This method may throw a exception because its underlying methods may
     /// throw exceptions.
     ///
-    /// \param zone The Zone wherein the additional data to the query is bo be
-    /// found.
+    /// \param zone The ZoneFinder through which the additional data for the
+    /// query is to be found.
     /// \param rrset The RRset (i.e., NS or MX rrset) which require additional
     /// processing.
-    void getAdditional(const isc::datasrc::Zone& zone,
-                       const isc::dns::RRset& rrset) const;
+    void addAdditional(isc::datasrc::ZoneFinder& zone,
+                       const isc::dns::RRset& rrset);
 
     /// \brief Find address records for a specified name.
     ///
@@ -102,18 +123,19 @@ private:
     /// The glue records must exactly match the name in the NS RDATA, without
     /// CNAME or wildcard processing.
     ///
-    /// \param zone The \c Zone wherein the address records is to be found.
+    /// \param zone The \c ZoneFinder through which the address records is to
+    /// be found.
     /// \param qname The name in rrset RDATA.
     /// \param options The search options.
-    void findAddrs(const isc::datasrc::Zone& zone,
-                   const isc::dns::Name& qname,
-                   const isc::datasrc::Zone::FindOptions options
-                   = isc::datasrc::Zone::FIND_DEFAULT) const;
+    void addAdditionalAddrs(isc::datasrc::ZoneFinder& zone,
+                            const isc::dns::Name& qname,
+                            const isc::datasrc::ZoneFinder::FindOptions options
+                            = isc::datasrc::ZoneFinder::FIND_DEFAULT);
 
-    /// \brief Look up \c Zone's NS and address records for the NS RDATA
-    /// (domain name) for authoritative answer.
+    /// \brief Look up a zone's NS RRset and their address records for an
+    /// authoritative answer, and add them to the additional section.
     ///
-    /// On returning an authoritative answer, insert the \c Zone's NS into the
+    /// On returning an authoritative answer, insert a zone's NS into the
     /// authority section and AAAA/A RRs of each of the NS RDATA into the
     /// additional section.
     ///
@@ -126,25 +148,29 @@ private:
     /// include AAAA/A RRs under a zone cut in additional section. (BIND 9
     /// excludes under-cut RRs; NSD include them.)
     ///
-    /// \param zone The \c Zone wherein the additional data to the query is to
-    /// be found.
-    void getAuthAdditional(const isc::datasrc::Zone& zone) const;
+    /// \param finder The \c ZoneFinder through which the NS and additional
+    /// data for the query are to be found.
+    void addAuthAdditional(isc::datasrc::ZoneFinder& finder);
 
 public:
     /// Constructor from query parameters.
     ///
     /// This constructor never throws an exception.
     ///
-    /// \param memory_datasrc The memory datasource wherein the answer to the query is
+    /// \param datasrc_client The datasource wherein the answer to the query is
     /// to be found.
     /// \param qname The query name
     /// \param qtype The RR type of the query
     /// \param response The response message to store the answer to the query.
-    Query(const isc::datasrc::MemoryDataSrc& memory_datasrc,
+    /// \param dnssec If the answer should include signatures and NSEC/NSEC3 if
+    ///     possible.
+    Query(const isc::datasrc::DataSourceClient& datasrc_client,
           const isc::dns::Name& qname, const isc::dns::RRType& qtype,
-          isc::dns::Message& response) :
-        memory_datasrc_(memory_datasrc), qname_(qname), qtype_(qtype),
-        response_(response)
+          isc::dns::Message& response, bool dnssec = false) :
+        datasrc_client_(datasrc_client), qname_(qname), qtype_(qtype),
+        response_(response), dnssec_(dnssec),
+        dnssec_opt_(dnssec ?  isc::datasrc::ZoneFinder::FIND_DNSSEC :
+                    isc::datasrc::ZoneFinder::FIND_DEFAULT)
     {}
 
     /// Process the query.
@@ -157,7 +183,7 @@ public:
     /// successful search would result in adding a corresponding RRset to
     /// the answer section of the response.
     ///
-    /// If no matching zone is found in the memory datasource, the RCODE of
+    /// If no matching zone is found in the datasource, the RCODE of
     /// SERVFAIL will be set in the response.
     /// <b>Note:</b> this is different from the error code that BIND 9 returns
     /// by default when it's configured as an authoritative-only server (and
@@ -173,7 +199,7 @@ public:
     /// This might throw BadZone or any of its specific subclasses, but that
     /// shouldn't happen in real-life (as BadZone means wrong data, it should
     /// have been rejected upon loading).
-    void process() const;
+    void process();
 
     /// \short Bad zone data encountered.
     ///
@@ -207,11 +233,24 @@ public:
         {}
     };
 
+    /// An invalid result is given when a valid NSEC is expected
+    ///
+    // This can only happen when the underlying data source implementation or
+    /// the zone is broken.  By throwing an exception we treat such cases
+    /// as SERVFAIL.
+    struct BadNSEC : public BadZone {
+        BadNSEC(const char* file, size_t line, const char* what) :
+            BadZone(file, line, what)
+        {}
+    };
+
 private:
-    const isc::datasrc::MemoryDataSrc& memory_datasrc_;
+    const isc::datasrc::DataSourceClient& datasrc_client_;
     const isc::dns::Name& qname_;
     const isc::dns::RRType& qtype_;
     isc::dns::Message& response_;
+    const bool dnssec_;
+    const isc::datasrc::ZoneFinder::FindOptions dnssec_opt_;
 };
 
 }
diff --git a/src/bin/auth/spec_config.h.pre.in b/src/bin/auth/spec_config.h.pre.in
index 52581dd..1b1df19 100644
--- a/src/bin/auth/spec_config.h.pre.in
+++ b/src/bin/auth/spec_config.h.pre.in
@@ -1,16 +1,16 @@
-// Copyright (C) 2009  Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#define AUTH_SPECFILE_LOCATION "@prefix@/share/@PACKAGE@/auth.spec"
-#define UNIX_SOCKET_FILE "@@LOCALSTATEDIR@@/auth_xfrout_conn"
+// Copyright (C) 2009  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define AUTH_SPECFILE_LOCATION "@prefix@/share/@PACKAGE@/auth.spec"
+#define UNIX_SOCKET_FILE "@@LOCALSTATEDIR@@/@PACKAGE@/auth_xfrout_conn"
diff --git a/src/bin/auth/statistics.cc b/src/bin/auth/statistics.cc
index 1cfa447..0e49fd5 100644
--- a/src/bin/auth/statistics.cc
+++ b/src/bin/auth/statistics.cc
@@ -44,12 +44,15 @@ public:
              const AuthCounters::PerZoneCounterType type);
     bool submitStatistics() const;
     void setStatisticsSession(isc::cc::AbstractSession* statistics_session);
+    void registerStatisticsValidator
+    (AuthCounters::validator_type validator);
     // Currently for testing purpose only
     uint64_t getCounter(const AuthCounters::ServerCounterType type) const;
 private:
     Counter server_counter_;
     CounterDictionary per_zone_counter_;
     isc::cc::AbstractSession* statistics_session_;
+    AuthCounters::validator_type validator_;
 };
 
 AuthCountersImpl::AuthCountersImpl() :
@@ -86,16 +89,25 @@ AuthCountersImpl::submitStatistics() const {
     }
     std::stringstream statistics_string;
     statistics_string << "{\"command\": [\"set\","
-                      <<   "{ \"stats_data\": "
-                      <<     "{ \"auth.queries.udp\": "
+                      <<   "{ \"owner\": \"Auth\","
+                      <<   "  \"data\":"
+                      <<     "{ \"queries.udp\": "
                       <<     server_counter_.get(AuthCounters::SERVER_UDP_QUERY)
-                      <<     ", \"auth.queries.tcp\": "
+                      <<     ", \"queries.tcp\": "
                       <<     server_counter_.get(AuthCounters::SERVER_TCP_QUERY)
                       <<   " }"
                       <<   "}"
                       << "]}";
     isc::data::ConstElementPtr statistics_element =
         isc::data::Element::fromJSON(statistics_string);
+    // validate the statistics data before send
+    if (validator_) {
+        if (!validator_(
+                statistics_element->get("command")->get(1)->get("data"))) {
+            LOG_ERROR(auth_logger, AUTH_INVALID_STATISTICS_DATA);
+            return (false);
+        }
+    }
     try {
         // group_{send,recv}msg() can throw an exception when encountering
         // an error, and group_recvmsg() will throw an exception on timeout.
@@ -124,6 +136,13 @@ AuthCountersImpl::setStatisticsSession
     statistics_session_ = statistics_session;
 }
 
+void
+AuthCountersImpl::registerStatisticsValidator
+    (AuthCounters::validator_type validator)
+{
+    validator_ = validator;
+}
+
 // Currently for testing purpose only
 uint64_t
 AuthCountersImpl::getCounter(const AuthCounters::ServerCounterType type) const {
@@ -156,3 +175,10 @@ uint64_t
 AuthCounters::getCounter(const AuthCounters::ServerCounterType type) const {
     return (impl_->getCounter(type));
 }
+
+void
+AuthCounters::registerStatisticsValidator
+    (AuthCounters::validator_type validator) const
+{
+    return (impl_->registerStatisticsValidator(validator));
+}
diff --git a/src/bin/auth/statistics.h b/src/bin/auth/statistics.h
index 3764ac9..280b4a5 100644
--- a/src/bin/auth/statistics.h
+++ b/src/bin/auth/statistics.h
@@ -83,7 +83,7 @@ public:
     ///
     /// \throw std::out_of_range \a type is unknown.
     ///
-    /// usage: counter.inc(CounterType::COUNTER_UDP_QUERY);
+    /// usage: counter.inc(AuthCounters::SERVER_UDP_QUERY);
     /// 
     void inc(const ServerCounterType type);
 
@@ -137,6 +137,26 @@ public:
     /// \return the value of the counter specified by \a type.
     ///
     uint64_t getCounter(const AuthCounters::ServerCounterType type) const;
+
+    /// \brief A type of validation function for the specification in
+    /// isc::config::ModuleSpec.
+    ///
+    /// This type might be useful for not only statistics
+    /// specificatoin but also for config_data specification and for
+    /// commnad.
+    ///
+    typedef boost::function<bool(const isc::data::ConstElementPtr&)>
+    validator_type;
+
+    /// \brief Register a function type of the statistics validation
+    /// function for AuthCounters.
+    ///
+    /// This method never throws an exception.
+    ///
+    /// \param validator A function type of the validation of
+    /// statistics specification.
+    ///
+    void registerStatisticsValidator(AuthCounters::validator_type validator) const;
 };
 
 #endif // __STATISTICS_H
diff --git a/src/bin/auth/tests/Makefile.am b/src/bin/auth/tests/Makefile.am
index e58dd53..e9527a4 100644
--- a/src/bin/auth/tests/Makefile.am
+++ b/src/bin/auth/tests/Makefile.am
@@ -37,6 +37,13 @@ run_unittests_SOURCES += query_unittest.cc
 run_unittests_SOURCES += change_user_unittest.cc
 run_unittests_SOURCES += statistics_unittest.cc
 run_unittests_SOURCES += run_unittests.cc
+# This is a temporary workaround for #1206, where the InMemoryClient has been
+# moved to an ldopened library. We could add that library to LDADD, but that
+# is nonportable. When #1207 is done this becomes moot anyway, and the
+# specific workaround is not needed anymore, so we can then remove this
+# line again.
+run_unittests_SOURCES += ${top_srcdir}/src/lib/datasrc/memory_datasrc.cc
+
 
 nodist_run_unittests_SOURCES = ../auth_messages.h ../auth_messages.cc
 
@@ -47,6 +54,7 @@ run_unittests_LDADD += $(SQLITE_LIBS)
 run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/datasrc/libdatasrc.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
 run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index 560db36..d90006a 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -229,7 +229,8 @@ TEST_F(AuthSrvTest, AXFROverUDP) {
 TEST_F(AuthSrvTest, AXFRSuccess) {
     EXPECT_FALSE(xfrout.isConnected());
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     // On success, the AXFR query has been passed to a separate process,
     // so we shouldn't have to respond.
@@ -245,7 +246,8 @@ TEST_F(AuthSrvTest, TSIGSigned) {
     const TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
     TSIGContext context(key);
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("version.bind"), RRClass::CH(), RRType::TXT());
+                                       Name("version.bind"), RRClass::CH(),
+                                       RRType::TXT());
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Run the message through the server
@@ -278,7 +280,8 @@ TEST_F(AuthSrvTest, TSIGSignedBadKey) {
     TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
     TSIGContext context(key);
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("version.bind"), RRClass::CH(), RRType::TXT());
+                                       Name("version.bind"), RRClass::CH(),
+                                       RRType::TXT());
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Process the message, but use a different key there
@@ -309,7 +312,8 @@ TEST_F(AuthSrvTest, TSIGBadSig) {
     TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
     TSIGContext context(key);
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("version.bind"), RRClass::CH(), RRType::TXT());
+                                       Name("version.bind"), RRClass::CH(),
+                                       RRType::TXT());
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Process the message, but use a different key there
@@ -375,7 +379,8 @@ TEST_F(AuthSrvTest, AXFRConnectFail) {
     EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
     xfrout.disableConnect();
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_TRUE(dnsserv.hasAnswer());
@@ -388,7 +393,8 @@ TEST_F(AuthSrvTest, AXFRSendFail) {
     // first send a valid query, making the connection with the xfr process
     // open.
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_TRUE(xfrout.isConnected());
@@ -397,7 +403,8 @@ TEST_F(AuthSrvTest, AXFRSendFail) {
     parse_message->clear(Message::PARSE);
     response_obuffer->clear();
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_TRUE(dnsserv.hasAnswer());
@@ -414,7 +421,66 @@ TEST_F(AuthSrvTest, AXFRDisconnectFail) {
     xfrout.disableSend();
     xfrout.disableDisconnect();
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    EXPECT_THROW(server.processMessage(*io_message, parse_message,
+                                       response_obuffer, &dnsserv),
+                 XfroutError);
+    EXPECT_TRUE(xfrout.isConnected());
+    // XXX: we need to re-enable disconnect.  otherwise an exception would be
+    // thrown via the destructor of the server.
+    xfrout.enableDisconnect();
+}
+
+TEST_F(AuthSrvTest, IXFRConnectFail) {
+    EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
+    xfrout.disableConnect();
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_TRUE(dnsserv.hasAnswer());
+    headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(),
+                opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+    EXPECT_FALSE(xfrout.isConnected());
+}
+
+TEST_F(AuthSrvTest, IXFRSendFail) {
+    // first send a valid query, making the connection with the xfr process
+    // open.
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_TRUE(xfrout.isConnected());
+
+    xfrout.disableSend();
+    parse_message->clear(Message::PARSE);
+    response_obuffer->clear();
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_TRUE(dnsserv.hasAnswer());
+    headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(),
+                opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+
+    // The connection should have been closed due to the send failure.
+    EXPECT_FALSE(xfrout.isConnected());
+}
+
+TEST_F(AuthSrvTest, IXFRDisconnectFail) {
+    // In our usage disconnect() shouldn't fail.  So we'll see the exception
+    // should it be thrown.
+    xfrout.disableSend();
+    xfrout.disableDisconnect();
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     EXPECT_THROW(server.processMessage(*io_message, parse_message,
                                        response_obuffer, &dnsserv),
@@ -426,8 +492,9 @@ TEST_F(AuthSrvTest, AXFRDisconnectFail) {
 }
 
 TEST_F(AuthSrvTest, notify) {
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -458,8 +525,9 @@ TEST_F(AuthSrvTest, notify) {
 
 TEST_F(AuthSrvTest, notifyForCHClass) {
     // Same as the previous test, but for the CH RRClass.
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::CH(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::CH(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -487,8 +555,9 @@ TEST_F(AuthSrvTest, notifyEmptyQuestion) {
 }
 
 TEST_F(AuthSrvTest, notifyMultiQuestions) {
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     // add one more SOA question
     request_message.addQuestion(Question(Name("example.com"), RRClass::IN(),
                                          RRType::SOA()));
@@ -501,8 +570,9 @@ TEST_F(AuthSrvTest, notifyMultiQuestions) {
 }
 
 TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::NS());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::NS());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -513,8 +583,9 @@ TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
 
 TEST_F(AuthSrvTest, notifyWithoutAA) {
     // implicitly leave the AA bit off.  our implementation will accept it.
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_TRUE(dnsserv.hasAnswer());
@@ -523,8 +594,9 @@ TEST_F(AuthSrvTest, notifyWithoutAA) {
 }
 
 TEST_F(AuthSrvTest, notifyWithErrorRcode) {
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     request_message.setRcode(Rcode::SERVFAIL());
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -537,8 +609,9 @@ TEST_F(AuthSrvTest, notifyWithErrorRcode) {
 TEST_F(AuthSrvTest, notifyWithoutSession) {
     server.setXfrinSession(NULL);
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
 
@@ -551,8 +624,9 @@ TEST_F(AuthSrvTest, notifyWithoutSession) {
 TEST_F(AuthSrvTest, notifySendFail) {
     notify_session.disableSend();
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
 
@@ -563,8 +637,9 @@ TEST_F(AuthSrvTest, notifySendFail) {
 TEST_F(AuthSrvTest, notifyReceiveFail) {
     notify_session.disableReceive();
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -574,8 +649,9 @@ TEST_F(AuthSrvTest, notifyReceiveFail) {
 TEST_F(AuthSrvTest, notifyWithBogusSessionMessage) {
     notify_session.setMessage(Element::fromJSON("{\"foo\": 1}"));
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -586,8 +662,9 @@ TEST_F(AuthSrvTest, notifyWithSessionMessageError) {
     notify_session.setMessage(
         Element::fromJSON("{\"result\": [1, \"FAIL\"]}"));
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -651,17 +728,17 @@ TEST_F(AuthSrvTest, updateConfigFail) {
                 QR_FLAG | AA_FLAG, 1, 1, 1, 0);
 }
 
-TEST_F(AuthSrvTest, updateWithMemoryDataSrc) {
+TEST_F(AuthSrvTest, updateWithInMemoryClient) {
     // Test configuring memory data source.  Detailed test cases are covered
     // in the configuration tests.  We only check the AuthSrv interface here.
 
     // By default memory data source isn't enabled
-    EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+    EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
     updateConfig(&server,
                  "{\"datasources\": [{\"type\": \"memory\"}]}", true);
     // after successful configuration, we should have one (with empty zoneset).
-    ASSERT_NE(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
-    EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
+    EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
 
     // The memory data source is empty, should return REFUSED rcode.
     createDataFromFile("examplequery_fromWire.wire");
@@ -672,7 +749,7 @@ TEST_F(AuthSrvTest, updateWithMemoryDataSrc) {
                 opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
 }
 
-TEST_F(AuthSrvTest, chQueryWithMemoryDataSrc) {
+TEST_F(AuthSrvTest, chQueryWithInMemoryClient) {
     // Configure memory data source for class IN
     updateConfig(&server, "{\"datasources\": "
                  "[{\"class\": \"IN\", \"type\": \"memory\"}]}", true);
@@ -737,12 +814,28 @@ TEST_F(AuthSrvTest, queryCounterTCPAXFR) {
                          Name("example.com"), RRClass::IN(), RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     // On success, the AXFR query has been passed to a separate process,
-    // so we shouldn't have to respond.
+    // so auth itself shouldn't respond.
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_FALSE(dnsserv.hasAnswer());
     // After processing TCP AXFR query, the counter should be 1.
     EXPECT_EQ(1, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
 }
 
+// Submit TCP IXFR query and check query counter
+TEST_F(AuthSrvTest, queryCounterTCPIXFR) {
+    // The counter should be initialized to 0.
+    EXPECT_EQ(0, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                         Name("example.com"), RRClass::IN(), RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    // On success, the IXFR query has been passed to a separate process,
+    // so auth itself shouldn't respond.
+    server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_FALSE(dnsserv.hasAnswer());
+    // After processing TCP IXFR query, the counter should be 1.
+    EXPECT_EQ(1, server.getCounter(AuthCounters::SERVER_TCP_QUERY));
+}
+
 // class for queryCounterUnexpected test
 // getProtocol() returns IPPROTO_IP
 class DummyUnknownSocket : public IOSocket {
diff --git a/src/bin/auth/tests/command_unittest.cc b/src/bin/auth/tests/command_unittest.cc
index 2fc8052..8a82367 100644
--- a/src/bin/auth/tests/command_unittest.cc
+++ b/src/bin/auth/tests/command_unittest.cc
@@ -60,7 +60,6 @@ protected:
     MockSession statistics_session;
     MockXfroutClient xfrout;
     AuthSrv server;
-    AuthSrv::ConstMemoryDataSrcPtr memory_datasrc;
     ConstElementPtr result;
     int rcode;
 public:
@@ -110,18 +109,18 @@ TEST_F(AuthCommandTest, shutdown) {
 // zones, and checks the zones are correctly loaded.
 void
 zoneChecks(AuthSrv& server) {
-    EXPECT_TRUE(server.getMemoryDataSrc(RRClass::IN()));
-    EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
-              findZone(Name("ns.test1.example")).zone->
+    EXPECT_TRUE(server.getInMemoryClient(RRClass::IN()));
+    EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+              findZone(Name("ns.test1.example")).zone_finder->
               find(Name("ns.test1.example"), RRType::A()).code);
-    EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
-              findZone(Name("ns.test1.example")).zone->
+    EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+              findZone(Name("ns.test1.example")).zone_finder->
               find(Name("ns.test1.example"), RRType::AAAA()).code);
-    EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
-              findZone(Name("ns.test2.example")).zone->
+    EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+              findZone(Name("ns.test2.example")).zone_finder->
               find(Name("ns.test2.example"), RRType::A()).code);
-    EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
-              findZone(Name("ns.test2.example")).zone->
+    EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+              findZone(Name("ns.test2.example")).zone_finder->
               find(Name("ns.test2.example"), RRType::AAAA()).code);
 }
 
@@ -147,21 +146,21 @@ configureZones(AuthSrv& server) {
 
 void
 newZoneChecks(AuthSrv& server) {
-    EXPECT_TRUE(server.getMemoryDataSrc(RRClass::IN()));
-    EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
-              findZone(Name("ns.test1.example")).zone->
+    EXPECT_TRUE(server.getInMemoryClient(RRClass::IN()));
+    EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+              findZone(Name("ns.test1.example")).zone_finder->
               find(Name("ns.test1.example"), RRType::A()).code);
     // now test1.example should have ns/AAAA
-    EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
-              findZone(Name("ns.test1.example")).zone->
+    EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+              findZone(Name("ns.test1.example")).zone_finder->
               find(Name("ns.test1.example"), RRType::AAAA()).code);
 
     // test2.example shouldn't change
-    EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(RRClass::IN())->
-              findZone(Name("ns.test2.example")).zone->
+    EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(RRClass::IN())->
+              findZone(Name("ns.test2.example")).zone_finder->
               find(Name("ns.test2.example"), RRType::A()).code);
-    EXPECT_EQ(Zone::NXRRSET, server.getMemoryDataSrc(RRClass::IN())->
-              findZone(Name("ns.test2.example")).zone->
+    EXPECT_EQ(ZoneFinder::NXRRSET, server.getInMemoryClient(RRClass::IN())->
+              findZone(Name("ns.test2.example")).zone_finder->
               find(Name("ns.test2.example"), RRType::AAAA()).code);
 }
 
diff --git a/src/bin/auth/tests/config_unittest.cc b/src/bin/auth/tests/config_unittest.cc
index 0890c55..dadb0ee 100644
--- a/src/bin/auth/tests/config_unittest.cc
+++ b/src/bin/auth/tests/config_unittest.cc
@@ -57,12 +57,12 @@ protected:
 
 TEST_F(AuthConfigTest, datasourceConfig) {
     // By default, we don't have any in-memory data source.
-    EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+    EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
     configureAuthServer(server, Element::fromJSON(
                             "{\"datasources\": [{\"type\": \"memory\"}]}"));
     // after successful configuration, we should have one (with empty zoneset).
-    ASSERT_NE(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
-    EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    ASSERT_NE(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
+    EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
 }
 
 TEST_F(AuthConfigTest, databaseConfig) {
@@ -82,7 +82,7 @@ TEST_F(AuthConfigTest, versionConfig) {
 }
 
 TEST_F(AuthConfigTest, exceptionGuarantee) {
-    EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+    EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
     // This configuration contains an invalid item, which will trigger
     // an exception.
     EXPECT_THROW(configureAuthServer(
@@ -92,7 +92,7 @@ TEST_F(AuthConfigTest, exceptionGuarantee) {
                          " \"no_such_config_var\": 1}")),
                  AuthConfigError);
     // The server state shouldn't change
-    EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+    EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
 }
 
 TEST_F(AuthConfigTest, exceptionConversion) {
@@ -154,22 +154,22 @@ protected:
 TEST_F(MemoryDatasrcConfigTest, addZeroDataSrc) {
     parser->build(Element::fromJSON("[]"));
     parser->commit();
-    EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+    EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
 }
 
 TEST_F(MemoryDatasrcConfigTest, addEmpty) {
     // By default, we don't have any in-memory data source.
-    EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+    EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
     parser->build(Element::fromJSON("[{\"type\": \"memory\"}]"));
     parser->commit();
-    EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
 }
 
 TEST_F(MemoryDatasrcConfigTest, addZeroZone) {
     parser->build(Element::fromJSON("[{\"type\": \"memory\","
                                     "  \"zones\": []}]"));
     parser->commit();
-    EXPECT_EQ(0, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(0, server.getInMemoryClient(rrclass)->getZoneCount());
 }
 
 TEST_F(MemoryDatasrcConfigTest, addOneZone) {
@@ -179,10 +179,10 @@ TEST_F(MemoryDatasrcConfigTest, addOneZone) {
                       "               \"file\": \"" TEST_DATA_DIR
                       "/example.zone\"}]}]")));
     EXPECT_NO_THROW(parser->commit());
-    EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
     // Check it actually loaded something
-    EXPECT_EQ(Zone::SUCCESS, server.getMemoryDataSrc(rrclass)->findZone(
-        Name("ns.example.com.")).zone->find(Name("ns.example.com."),
+    EXPECT_EQ(ZoneFinder::SUCCESS, server.getInMemoryClient(rrclass)->findZone(
+        Name("ns.example.com.")).zone_finder->find(Name("ns.example.com."),
         RRType::A()).code);
 }
 
@@ -199,7 +199,7 @@ TEST_F(MemoryDatasrcConfigTest, addMultiZones) {
                       "               \"file\": \"" TEST_DATA_DIR
                       "/example.net.zone\"}]}]")));
     EXPECT_NO_THROW(parser->commit());
-    EXPECT_EQ(3, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(3, server.getInMemoryClient(rrclass)->getZoneCount());
 }
 
 TEST_F(MemoryDatasrcConfigTest, replace) {
@@ -209,9 +209,9 @@ TEST_F(MemoryDatasrcConfigTest, replace) {
                       "               \"file\": \"" TEST_DATA_DIR
                       "/example.zone\"}]}]")));
     EXPECT_NO_THROW(parser->commit());
-    EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
     EXPECT_EQ(isc::datasrc::result::SUCCESS,
-              server.getMemoryDataSrc(rrclass)->findZone(
+              server.getInMemoryClient(rrclass)->findZone(
                   Name("example.com")).code);
 
     // create a new parser, and install a new set of configuration.  It
@@ -227,9 +227,9 @@ TEST_F(MemoryDatasrcConfigTest, replace) {
                       "               \"file\": \"" TEST_DATA_DIR
                       "/example.net.zone\"}]}]")));
     EXPECT_NO_THROW(parser->commit());
-    EXPECT_EQ(2, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(2, server.getInMemoryClient(rrclass)->getZoneCount());
     EXPECT_EQ(isc::datasrc::result::NOTFOUND,
-              server.getMemoryDataSrc(rrclass)->findZone(
+              server.getInMemoryClient(rrclass)->findZone(
                   Name("example.com")).code);
 }
 
@@ -241,9 +241,9 @@ TEST_F(MemoryDatasrcConfigTest, exception) {
                       "               \"file\": \"" TEST_DATA_DIR
                       "/example.zone\"}]}]")));
     EXPECT_NO_THROW(parser->commit());
-    EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
     EXPECT_EQ(isc::datasrc::result::SUCCESS,
-              server.getMemoryDataSrc(rrclass)->findZone(
+              server.getInMemoryClient(rrclass)->findZone(
                   Name("example.com")).code);
 
     // create a new parser, and try to load something. It will throw,
@@ -262,9 +262,9 @@ TEST_F(MemoryDatasrcConfigTest, exception) {
     // commit it
 
     // The original should be untouched
-    EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
     EXPECT_EQ(isc::datasrc::result::SUCCESS,
-              server.getMemoryDataSrc(rrclass)->findZone(
+              server.getInMemoryClient(rrclass)->findZone(
                   Name("example.com")).code);
 }
 
@@ -275,13 +275,13 @@ TEST_F(MemoryDatasrcConfigTest, remove) {
                       "               \"file\": \"" TEST_DATA_DIR
                       "/example.zone\"}]}]")));
     EXPECT_NO_THROW(parser->commit());
-    EXPECT_EQ(1, server.getMemoryDataSrc(rrclass)->getZoneCount());
+    EXPECT_EQ(1, server.getInMemoryClient(rrclass)->getZoneCount());
 
     delete parser;
     parser = createAuthConfigParser(server, "datasources"); 
     EXPECT_NO_THROW(parser->build(Element::fromJSON("[]")));
     EXPECT_NO_THROW(parser->commit());
-    EXPECT_EQ(AuthSrv::MemoryDataSrcPtr(), server.getMemoryDataSrc(rrclass));
+    EXPECT_EQ(AuthSrv::InMemoryClientPtr(), server.getInMemoryClient(rrclass));
 }
 
 TEST_F(MemoryDatasrcConfigTest, adDuplicateZones) {
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index c68b672..14067ab 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -17,6 +17,7 @@
 #include <map>
 
 #include <boost/bind.hpp>
+#include <boost/scoped_ptr.hpp>
 
 #include <dns/masterload.h>
 #include <dns/message.h>
@@ -91,11 +92,82 @@ const char* const other_zone_rrs =
     "cnamemailer.example.com. 3600 IN CNAME www.example.com.\n"
     "cnamemx.example.com. 3600 IN MX 10 cnamemailer.example.com.\n"
     "mx.delegation.example.com. 3600 IN A 192.0.2.100\n";
+// Wildcards
+const char* const wild_txt = "*.wild.example.com. 3600 IN A 192.0.2.7\n";
+const char* const nsec_wild_txt =
+    "*.wild.example.com. 3600 IN NSEC www.example.com. A NSEC RRSIG\n";
+const char* const cnamewild_txt =
+    "*.cnamewild.example.com. 3600 IN CNAME www.example.org.\n";
+const char* const nsec_cnamewild_txt = "*.cnamewild.example.com. "
+    "3600 IN NSEC delegation.example.com. CNAME NSEC RRSIG\n";
+// Wildcard_nxrrset
+const char* const wild_txt_nxrrset =
+    "*.uwild.example.com. 3600 IN A 192.0.2.9\n";
+const char* const nsec_wild_txt_nxrrset =
+    "*.uwild.example.com. 3600 IN NSEC www.uwild.example.com. A NSEC RRSIG\n";
+const char* const wild_txt_next =
+    "www.uwild.example.com. 3600 IN A 192.0.2.11\n";
+const char* const nsec_wild_txt_next =
+    "www.uwild.example.com. 3600 IN NSEC *.wild.example.com. A NSEC RRSIG\n";
+// Wildcard empty
+const char* const empty_txt = "b.*.t.example.com. 3600 IN A 192.0.2.13\n";
+const char* const nsec_empty_txt =
+    "b.*.t.example.com. 3600 IN NSEC *.uwild.example.com. A NSEC RRSIG\n";
+const char* const empty_prev_txt = "t.example.com. 3600 IN A 192.0.2.15\n";
+const char* const nsec_empty_prev_txt =
+    "t.example.com. 3600 IN NSEC b.*.t.example.com. A NSEC RRSIG\n";
+// Used in NXDOMAIN proof test.  We are going to test some unusual case where
+// the best possible wildcard is below the "next domain" of the NSEC RR that
+// proves the NXDOMAIN, i.e.,
+// mx.example.com. (exist)
+// (.no.example.com. (qname, NXDOMAIN)
+// ).no.example.com. (exist)
+// *.no.example.com. (best possible wildcard, not exist)
+const char* const no_txt =
+    ").no.example.com. 3600 IN AAAA 2001:db8::53\n";
+// NSEC records.
+const char* const nsec_apex_txt =
+    "example.com. 3600 IN NSEC cname.example.com. NS SOA NSEC RRSIG\n";
+const char* const nsec_mx_txt =
+    "mx.example.com. 3600 IN NSEC ).no.example.com. MX NSEC RRSIG\n";
+const char* const nsec_no_txt =
+    ").no.example.com. 3600 IN NSEC nz.no.example.com. AAAA NSEC RRSIG\n";
+// We'll also test the case where a single NSEC proves both NXDOMAIN and the
+// non existence of wildcard.  The following records will be used for that
+// test.
+// ).no.example.com. (exist, whose NSEC proves everything)
+// *.no.example.com. (best possible wildcard, not exist)
+// nx.no.example.com. (NXDOMAIN)
+// nz.no.example.com. (exist)
+const char* const nz_txt =
+    "nz.no.example.com. 3600 IN AAAA 2001:db8::5300\n";
+const char* const nsec_nz_txt =
+    "nz.no.example.com. 3600 IN NSEC noglue.example.com. AAAA NSEC RRSIG\n";
+const char* const nsec_nxdomain_txt =
+    "noglue.example.com. 3600 IN NSEC nonsec.example.com. A\n";
+
+// NSEC for the normal NXRRSET case
+const char* const nsec_www_txt =
+    "www.example.com. 3600 IN NSEC example.com. A NSEC RRSIG\n";
+
+// Authoritative data without NSEC
+const char* const nonsec_a_txt = "nonsec.example.com. 3600 IN A 192.0.2.0\n";
+
+// A helper function that generates a textual representation of RRSIG RDATA
+// for the given covered type.  The resulting RRSIG may not necessarily make
+// sense in terms of the DNSSEC protocol, but for our testing purposes it's
+// okay.
+string
+getCommonRRSIGText(const string& type) {
+    return (type +
+            string(" 5 3 3600 20000101000000 20000201000000 12345 "
+                   "example.com. FAKEFAKEFAKE"));
+}
 
-// This is a mock Zone class for testing.
-// It is a derived class of Zone for the convenient of tests.
+// This is a mock Zone Finder class for testing.
+// It is a derived class of ZoneFinder for the convenient of tests.
 // Its find() method emulates the common behavior of protocol compliant
-// zone classes, but simplifies some minor cases and also supports broken
+// ZoneFinder classes, but simplifies some minor cases and also supports broken
 // behavior.
 // For simplicity, most names are assumed to be "in zone"; there's only
 // one zone cut at the point of name "delegation.example.com".
@@ -103,31 +175,44 @@ const char* const other_zone_rrs =
 // will result in DNAME.
 // This mock zone doesn't handle empty non terminal nodes (if we need to test
 // such cases find() should have specialized code for it).
-class MockZone : public Zone {
+class MockZoneFinder : public ZoneFinder {
 public:
-    MockZone() :
+    MockZoneFinder() :
         origin_(Name("example.com")),
         delegation_name_("delegation.example.com"),
         dname_name_("dname.example.com"),
         has_SOA_(true),
         has_apex_NS_(true),
-        rrclass_(RRClass::IN())
+        rrclass_(RRClass::IN()),
+        include_rrsig_anyway_(false),
+        nsec_name_(origin_)
     {
         stringstream zone_stream;
         zone_stream << soa_txt << zone_ns_txt << ns_addrs_txt <<
             delegation_txt << mx_txt << www_a_txt << cname_txt <<
             cname_nxdom_txt << cname_out_txt << dname_txt << dname_a_txt <<
-            other_zone_rrs;
+            other_zone_rrs << no_txt << nz_txt <<
+            nsec_apex_txt << nsec_mx_txt << nsec_no_txt << nsec_nz_txt <<
+            nsec_nxdomain_txt << nsec_www_txt << nonsec_a_txt <<
+            wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt <<
+            wild_txt_nxrrset << nsec_wild_txt_nxrrset << wild_txt_next <<
+            nsec_wild_txt_next << empty_txt << nsec_empty_txt <<
+            empty_prev_txt << nsec_empty_prev_txt;
 
         masterLoad(zone_stream, origin_, rrclass_,
-                   boost::bind(&MockZone::loadRRset, this, _1));
+                   boost::bind(&MockZoneFinder::loadRRset, this, _1));
+
+        empty_nsec_rrset_ = ConstRRsetPtr(new RRset(Name::ROOT_NAME(),
+                                                    RRClass::IN(),
+                                                    RRType::NSEC(),
+                                                    RRTTL(3600)));
     }
-    virtual const isc::dns::Name& getOrigin() const { return (origin_); }
-    virtual const isc::dns::RRClass& getClass() const { return (rrclass_); }
+    virtual isc::dns::Name getOrigin() const { return (origin_); }
+    virtual isc::dns::RRClass getClass() const { return (rrclass_); }
     virtual FindResult find(const isc::dns::Name& name,
                             const isc::dns::RRType& type,
                             RRsetList* target = NULL,
-                            const FindOptions options = FIND_DEFAULT) const;
+                            const FindOptions options = FIND_DEFAULT);
 
     // If false is passed, it makes the zone broken as if it didn't have the
     // SOA.
@@ -137,11 +222,32 @@ public:
     // the apex NS.
     void setApexNSFlag(bool on) { has_apex_NS_ = on; }
 
+    // Turn this on if you want it to return RRSIGs regardless of FIND_GLUE_OK
+    void setIncludeRRSIGAnyway(bool on) { include_rrsig_anyway_ = on; }
+
+    // Once called, this "faked" result will be returned when NSEC is expected
+    // for the specified query name.
+    void setNSECResult(const Name& nsec_name, Result code,
+                       ConstRRsetPtr rrset)
+    {
+        nsec_name_ = nsec_name;
+        nsec_result_.reset(new ZoneFinder::FindResult(code, rrset));
+    }
+
+    Name findPreviousName(const Name&) const {
+        isc_throw(isc::NotImplemented, "Mock doesn't support previous name");
+    }
+
+public:
+    // We allow the tests to use these for convenience
+    ConstRRsetPtr delegation_rrset_;
+    ConstRRsetPtr empty_nsec_rrset_;
+
 private:
     typedef map<RRType, ConstRRsetPtr> RRsetStore;
     typedef map<Name, RRsetStore> Domains;
     Domains domains_;
-    void loadRRset(ConstRRsetPtr rrset) {
+    void loadRRset(RRsetPtr rrset) {
         domains_[rrset->getName()][rrset->getType()] = rrset;
         if (rrset->getName() == delegation_name_ &&
             rrset->getType() == RRType::NS()) {
@@ -149,6 +255,20 @@ private:
         } else if (rrset->getName() == dname_name_ &&
             rrset->getType() == RRType::DNAME()) {
             dname_rrset_ = rrset;
+        // Add some signatures
+        } else if (rrset->getName() == Name("example.com.") &&
+                   rrset->getType() == RRType::NS()) {
+            // For NS, we only have RRSIG for the origin name.
+            rrset->addRRsig(RdataPtr(new generic::RRSIG(
+                                         getCommonRRSIGText("NS"))));
+        } else {
+            // For others generate RRSIG unconditionally.  Technically this
+            // is wrong because we shouldn't have it for names under a zone
+            // cut.  But in our tests that doesn't matter, so we add them
+            // just for simplicity.
+            rrset->addRRsig(RdataPtr(new generic::RRSIG(
+                                         getCommonRRSIGText(rrset->getType().
+                                                            toText()))));
         }
     }
 
@@ -158,14 +278,35 @@ private:
     const Name dname_name_;
     bool has_SOA_;
     bool has_apex_NS_;
-    ConstRRsetPtr delegation_rrset_;
     ConstRRsetPtr dname_rrset_;
     const RRClass rrclass_;
+    bool include_rrsig_anyway_;
+    // The following two will be used for faked NSEC cases
+    Name nsec_name_;
+    boost::scoped_ptr<ZoneFinder::FindResult> nsec_result_;
 };
 
-Zone::FindResult
-MockZone::find(const Name& name, const RRType& type,
-               RRsetList* target, const FindOptions options) const
+// A helper function that generates a new RRset based on "wild_rrset",
+// replacing its owner name with 'real_name'.
+ConstRRsetPtr
+substituteWild(const RRset& wild_rrset, const Name& real_name) {
+    RRsetPtr rrset(new RRset(real_name, wild_rrset.getClass(),
+                             wild_rrset.getType(), wild_rrset.getTTL()));
+    // For simplicity we only consider the case with one RDATA (for now)
+    rrset->addRdata(wild_rrset.getRdataIterator()->getCurrent());
+    ConstRRsetPtr wild_sig = wild_rrset.getRRsig();
+    if (wild_sig) {
+        RRsetPtr sig(new RRset(real_name, wild_sig->getClass(),
+                               wild_sig->getType(), wild_sig->getTTL()));
+        sig->addRdata(wild_sig->getRdataIterator()->getCurrent());
+        rrset->addRRsig(sig);
+    }
+    return (rrset);
+}
+
+ZoneFinder::FindResult
+MockZoneFinder::find(const Name& name, const RRType& type,
+                     RRsetList* target, const FindOptions options)
 {
     // Emulating a broken zone: mandatory apex RRs are missing if specifically
     // configured so (which are rare cases).
@@ -195,7 +336,26 @@ MockZone::find(const Name& name, const RRType& type,
         RRsetStore::const_iterator found_rrset =
             found_domain->second.find(type);
         if (found_rrset != found_domain->second.end()) {
-            return (FindResult(SUCCESS, found_rrset->second));
+            ConstRRsetPtr rrset;
+            // Strip whatever signature there is in case DNSSEC is not required
+            // Just to make sure the Query asks for it when it is needed
+            if (options & ZoneFinder::FIND_DNSSEC ||
+                include_rrsig_anyway_ ||
+                !found_rrset->second->getRRsig()) {
+                rrset = found_rrset->second;
+            } else {
+                RRsetPtr noconst(new RRset(found_rrset->second->getName(),
+                                           found_rrset->second->getClass(),
+                                           found_rrset->second->getType(),
+                                           found_rrset->second->getTTL()));
+                for (RdataIteratorPtr
+                     i(found_rrset->second->getRdataIterator());
+                     !i->isLast(); i->next()) {
+                    noconst->addRdata(i->getCurrent());
+                }
+                rrset = noconst;
+            }
+            return (FindResult(SUCCESS, rrset));
         }
 
         // If not found but we have a target, fill it with all RRsets here
@@ -216,10 +376,126 @@ MockZone::find(const Name& name, const RRType& type,
         }
 
         // Otherwise it's NXRRSET case.
+        if ((options & FIND_DNSSEC) != 0) {
+            found_rrset = found_domain->second.find(RRType::NSEC());
+            if (found_rrset != found_domain->second.end()) {
+                return (FindResult(NXRRSET, found_rrset->second));
+            }
+        }
+        return (FindResult(NXRRSET, RRsetPtr()));
+    }
+
+    // query name isn't found in our domains.
+    // We first check if the query name is an empty non terminal name
+    // of the zone by naive linear search.
+    Domains::const_iterator domain;
+    for (domain = domains_.begin(); domain != domains_.end(); ++domain) {
+        if (name.compare((*domain).first).getRelation() ==
+            NameComparisonResult::SUPERDOMAIN) {
+            break;
+        }
+    }
+    if (domain != domains_.end()) {
+        // The query name is in an empty non terminal node followed by 'domain'
+        // (for simplicity we ignore the pathological case of 'domain' is
+        // the origin of the zone)
+        --domain;               // reset domain to the "previous name"
+        if ((options & FIND_DNSSEC) != 0) {
+            RRsetStore::const_iterator found_rrset =
+                (*domain).second.find(RRType::NSEC());
+            if (found_rrset != (*domain).second.end()) {
+                return (FindResult(NXRRSET, found_rrset->second));
+            }
+        }
         return (FindResult(NXRRSET, RRsetPtr()));
     }
 
-    // query name isn't found in our domains.  returns NXDOMAIN.
+    // Another possibility is wildcard.  For simplicity we only check
+    // hardcoded specific cases, ignoring other details such as canceling
+    // due to the existence of closer name.
+    if ((options & NO_WILDCARD) == 0) {
+        const Name wild_suffix(name.split(1));
+        // Unit Tests use those domains for Wildcard test.
+        if (name.equals(Name("www.wild.example.com"))||
+           name.equals(Name("www1.uwild.example.com"))||
+           name.equals(Name("a.t.example.com"))) {
+            if (name.compare(wild_suffix).getRelation() ==
+                NameComparisonResult::SUBDOMAIN) {
+                domain = domains_.find(Name("*").concatenate(wild_suffix));
+                // Matched the QNAME
+                if (domain != domains_.end()) {
+                   RRsetStore::const_iterator found_rrset =
+                       domain->second.find(type);
+                   // Matched the QTYPE
+                   if(found_rrset != domain->second.end()) {
+                    return (FindResult(WILDCARD,
+                            substituteWild(*found_rrset->second, name)));
+                   } else {
+                   // No matched QTYPE, this case is for WILDCARD_NXRRSET
+                     found_rrset = domain->second.find(RRType::NSEC());
+                     assert(found_rrset != domain->second.end());
+                     Name newName = Name("*").concatenate(wild_suffix);
+                     return (FindResult(WILDCARD_NXRRSET,
+                           substituteWild(*found_rrset->second,newName)));
+                   }
+                 } else {
+                    // This is empty non terminal name case on wildcard.
+                    Name emptyName = Name("*").concatenate(wild_suffix);
+                    for (Domains::reverse_iterator it = domains_.rbegin();
+                        it != domains_.rend();
+                        ++it) {
+                            RRsetStore::const_iterator nsec_it;
+                            if ((*it).first < emptyName &&
+                            (nsec_it = (*it).second.find(RRType::NSEC()))
+                            != (*it).second.end()) {
+                                return (FindResult(WILDCARD_NXRRSET,
+                                                   (*nsec_it).second));
+                            }
+                        }
+                }
+                return (FindResult(WILDCARD_NXRRSET,RRsetPtr()));
+             }
+        }
+        const Name cnamewild_suffix("cnamewild.example.com");
+        if (name.compare(cnamewild_suffix).getRelation() ==
+            NameComparisonResult::SUBDOMAIN) {
+            domain = domains_.find(Name("*").concatenate(cnamewild_suffix));
+            assert(domain != domains_.end());
+            RRsetStore::const_iterator found_rrset =
+                domain->second.find(RRType::CNAME());
+            assert(found_rrset != domain->second.end());
+            return (FindResult(WILDCARD_CNAME,
+                               substituteWild(*found_rrset->second, name)));
+        }
+    }
+
+    // This is an NXDOMAIN case.
+    // If we need DNSSEC proof, find the "previous name" that has an NSEC RR
+    // and return NXDOMAIN with the found NSEC.  Otherwise, just return the
+    // NXDOMAIN code and NULL.  If DNSSEC proof is requested but no NSEC is
+    // found, we return NULL, too.  (For simplicity under the test conditions
+    // we don't care about pathological cases such as the name is "smaller"
+    // than the origin)
+    if ((options & FIND_DNSSEC) != 0) {
+        // Emulate a broken DataSourceClient for some special names.
+        if (nsec_result_ && nsec_name_ == name) {
+            return (*nsec_result_);
+        }
+
+        // Normal case
+        // XXX: some older g++ complains about operator!= if we use
+        // const_reverse_iterator
+        for (Domains::reverse_iterator it = domains_.rbegin();
+             it != domains_.rend();
+             ++it) {
+            RRsetStore::const_iterator nsec_it;
+            if ((*it).first < name &&
+                (nsec_it = (*it).second.find(RRType::NSEC()))
+                != (*it).second.end()) {
+                return (FindResult(NXDOMAIN, (*nsec_it).second));
+            }
+        }
+    }
     return (FindResult(NXDOMAIN, RRsetPtr()));
 }
 
@@ -233,11 +509,15 @@ protected:
         response.setRcode(Rcode::NOERROR());
         response.setOpcode(Opcode::QUERY());
         // create and add a matching zone.
-        mock_zone = new MockZone();
-        memory_datasrc.addZone(ZonePtr(mock_zone));
+        mock_finder = new MockZoneFinder();
+        memory_client.addZone(ZoneFinderPtr(mock_finder));
     }
-    MockZone* mock_zone;
-    MemoryDataSrc memory_datasrc;
+    MockZoneFinder* mock_finder;
+    // We use InMemoryClient here. We could have some kind of mock client
+    // here, but historically, the Query supported only InMemoryClient
+    // (originally named MemoryDataSrc) and was tested with it, so we keep
+    // it like this for now.
+    InMemoryClient memory_client;
     const Name qname;
     const RRClass qclass;
     const RRType qtype;
@@ -286,24 +566,76 @@ responseCheck(Message& response, const isc::dns::Rcode& rcode,
 TEST_F(QueryTest, noZone) {
     // There's no zone in the memory datasource.  So the response should have
     // REFUSED.
-    MemoryDataSrc empty_memory_datasrc;
-    Query nozone_query(empty_memory_datasrc, qname, qtype, response);
+    InMemoryClient empty_memory_client;
+    Query nozone_query(empty_memory_client, qname, qtype, response);
     EXPECT_NO_THROW(nozone_query.process());
     EXPECT_EQ(Rcode::REFUSED(), response.getRcode());
 }
 
 TEST_F(QueryTest, exactMatch) {
-    Query query(memory_datasrc, qname, qtype, response);
+    Query query(memory_client, qname, qtype, response);
     EXPECT_NO_THROW(query.process());
     // find match rrset
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
                   www_a_txt, zone_ns_txt, ns_addrs_txt);
 }
 
+TEST_F(QueryTest, exactMatchIgnoreSIG) {
+    // Check that we do not include the RRSIG when not requested even when
+    // we receive it from the data source.
+    mock_finder->setIncludeRRSIGAnyway(true);
+    Query query(memory_client, qname, qtype, response);
+    EXPECT_NO_THROW(query.process());
+    // find match rrset
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
+                  www_a_txt, zone_ns_txt, ns_addrs_txt);
+}
+
+TEST_F(QueryTest, dnssecPositive) {
+    // Just like exactMatch, but the signatures should be included as well
+    Query query(memory_client, qname, qtype, response, true);
+    EXPECT_NO_THROW(query.process());
+    // find match rrset
+    // We can't let responseCheck to check the additional section as well,
+    // it gets confused by the two RRs for glue.delegation.../RRSIG due
+    // to it's design and fixing it would be hard. Therefore we simply
+    // check manually this one time.
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 4, 6,
+                  (www_a_txt + std::string("www.example.com. 3600 IN RRSIG "
+                                           "A 5 3 3600 20000101000000 "
+                                           "20000201000000 12345 example.com. "
+                                           "FAKEFAKEFAKE\n")).c_str(),
+                  (zone_ns_txt + std::string("example.com. 3600 IN RRSIG NS 5 "
+                                             "3 3600 20000101000000 "
+                                             "20000201000000 12345 "
+                                             "example.com. FAKEFAKEFAKE\n")).
+                  c_str(), NULL);
+    RRsetIterator iterator(response.beginSection(Message::SECTION_ADDITIONAL));
+    const char* additional[] = {
+        "glue.delegation.example.com. 3600 IN A 192.0.2.153\n",
+        "glue.delegation.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+            "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+        "glue.delegation.example.com. 3600 IN AAAA 2001:db8::53\n",
+        "glue.delegation.example.com. 3600 IN RRSIG AAAA 5 3 3600 "
+            "20000101000000 20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+        "noglue.example.com. 3600 IN A 192.0.2.53\n",
+        "noglue.example.com. 3600 IN RRSIG A 5 3 3600 20000101000000 "
+            "20000201000000 12345 example.com. FAKEFAKEFAKE\n",
+        NULL
+    };
+    for (const char** rr(additional); *rr != NULL; ++ rr) {
+        ASSERT_FALSE(iterator ==
+                     response.endSection(Message::SECTION_ADDITIONAL));
+        EXPECT_EQ(*rr, (*iterator)->toText());
+        iterator ++;
+    }
+    EXPECT_TRUE(iterator == response.endSection(Message::SECTION_ADDITIONAL));
+}
+
 TEST_F(QueryTest, exactAddrMatch) {
     // find match rrset, omit additional data which has already been provided
     // in the answer section from the additional.
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("noglue.example.com"), qtype,
+    EXPECT_NO_THROW(Query(memory_client, Name("noglue.example.com"), qtype,
                           response).process());
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 2,
@@ -315,7 +647,7 @@ TEST_F(QueryTest, exactAddrMatch) {
 TEST_F(QueryTest, apexNSMatch) {
     // find match rrset, omit authority data which has already been provided
     // in the answer section from the authority section.
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("example.com"), RRType::NS(),
+    EXPECT_NO_THROW(Query(memory_client, Name("example.com"), RRType::NS(),
                           response).process());
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 0, 3,
@@ -326,11 +658,12 @@ TEST_F(QueryTest, apexNSMatch) {
 TEST_F(QueryTest, exactAnyMatch) {
     // find match rrset, omit additional data which has already been provided
     // in the answer section from the additional.
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("noglue.example.com"),
+    EXPECT_NO_THROW(Query(memory_client, Name("noglue.example.com"),
                           RRType::ANY(), response).process());
 
-    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 2,
-                  "noglue.example.com. 3600 IN A 192.0.2.53\n",
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 3, 2,
+                  (string("noglue.example.com. 3600 IN A 192.0.2.53\n") +
+                   string(nsec_nxdomain_txt)).c_str(),
                   zone_ns_txt,
                   "glue.delegation.example.com. 3600 IN A 192.0.2.153\n"
                   "glue.delegation.example.com. 3600 IN AAAA 2001:db8::53\n");
@@ -339,36 +672,34 @@ TEST_F(QueryTest, exactAnyMatch) {
 TEST_F(QueryTest, apexAnyMatch) {
     // find match rrset, omit additional data which has already been provided
     // in the answer section from the additional.
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("example.com"),
+    EXPECT_NO_THROW(Query(memory_client, Name("example.com"),
                           RRType::ANY(), response).process());
-    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 4, 0, 3,
-                  "example.com. 3600 IN SOA . . 0 0 0 0 0\n"
-                  "example.com. 3600 IN NS glue.delegation.example.com.\n"
-                  "example.com. 3600 IN NS noglue.example.com.\n"
-                  "example.com. 3600 IN NS example.net.\n",
-                  NULL, ns_addrs_txt, mock_zone->getOrigin());
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 5, 0, 3,
+                  (string(soa_txt) + string(zone_ns_txt) +
+                   string(nsec_apex_txt)).c_str(),
+                  NULL, ns_addrs_txt, mock_finder->getOrigin());
 }
 
 TEST_F(QueryTest, mxANYMatch) {
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("mx.example.com"),
+    EXPECT_NO_THROW(Query(memory_client, Name("mx.example.com"),
                           RRType::ANY(), response).process());
-    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 3, 4,
-                  mx_txt, zone_ns_txt,
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 4, 3, 4,
+                  (string(mx_txt) + string(nsec_mx_txt)).c_str(), zone_ns_txt,
                   (string(ns_addrs_txt) + string(www_a_txt)).c_str());
 }
 
 TEST_F(QueryTest, glueANYMatch) {
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("delegation.example.com"),
+    EXPECT_NO_THROW(Query(memory_client, Name("delegation.example.com"),
                           RRType::ANY(), response).process());
     responseCheck(response, Rcode::NOERROR(), 0, 0, 4, 3,
                   NULL, delegation_txt, ns_addrs_txt);
 }
 
 TEST_F(QueryTest, nodomainANY) {
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("nxdomain.example.com"),
+    EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"),
                           RRType::ANY(), response).process());
     responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
-                  NULL, soa_txt, NULL, mock_zone->getOrigin());
+                  NULL, soa_txt, NULL, mock_finder->getOrigin());
 }
 
 // This tests that when we need to look up Zone's apex NS records for
@@ -376,15 +707,15 @@ TEST_F(QueryTest, nodomainANY) {
 // throw in that case.
 TEST_F(QueryTest, noApexNS) {
     // Disable apex NS record
-    mock_zone->setApexNSFlag(false);
+    mock_finder->setApexNSFlag(false);
 
-    EXPECT_THROW(Query(memory_datasrc, Name("noglue.example.com"), qtype,
+    EXPECT_THROW(Query(memory_client, Name("noglue.example.com"), qtype,
                        response).process(), Query::NoApexNS);
     // We don't look into the response, as it threw
 }
 
 TEST_F(QueryTest, delegation) {
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("delegation.example.com"),
+    EXPECT_NO_THROW(Query(memory_client, Name("delegation.example.com"),
                           qtype, response).process());
 
     responseCheck(response, Rcode::NOERROR(), 0, 0, 4, 3,
@@ -392,18 +723,309 @@ TEST_F(QueryTest, delegation) {
 }
 
 TEST_F(QueryTest, nxdomain) {
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("nxdomain.example.com"), qtype,
+    EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
                           response).process());
     responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 1, 0,
-                  NULL, soa_txt, NULL, mock_zone->getOrigin());
+                  NULL, soa_txt, NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainWithNSEC) {
+    // NXDOMAIN with DNSSEC proof.  We should have SOA, NSEC that proves
+    // NXDOMAIN and NSEC that proves nonexistence of matching wildcard,
+    // as well as their RRSIGs.
+    EXPECT_NO_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
+                          response, true).process());
+    responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 6, 0,
+                  NULL, (string(soa_txt) +
+                         string("example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("SOA") + "\n" +
+                         string(nsec_nxdomain_txt) + "\n" +
+                         string("noglue.example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("NSEC") + "\n" +
+                         string(nsec_apex_txt) + "\n" +
+                         string("example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("NSEC")).c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainWithNSEC2) {
+    // See comments about no_txt.  In this case the best possible wildcard
+    // is derived from the next domain of the NSEC that proves NXDOMAIN, and
+    // the NSEC to provide the non existence of wildcard is different from
+    // the first NSEC.
+    Query(memory_client, Name("(.no.example.com"), qtype,
+          response, true).process();
+    responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 6, 0,
+                  NULL, (string(soa_txt) +
+                         string("example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("SOA") + "\n" +
+                         string(nsec_mx_txt) + "\n" +
+                         string("mx.example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("NSEC") + "\n" +
+                         string(nsec_no_txt) + "\n" +
+                         string(").no.example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("NSEC")).c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainWithNSECDuplicate) {
+    // See comments about nz_txt.  In this case we only need one NSEC,
+    // which proves both NXDOMAIN and the non existence of wildcard.
+    Query(memory_client, Name("nx.no.example.com"), qtype,
+          response, true).process();
+    responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 4, 0,
+                  NULL, (string(soa_txt) +
+                         string("example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("SOA") + "\n" +
+                         string(nsec_no_txt) + "\n" +
+                         string(").no.example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("NSEC")).c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC1) {
+    // ZoneFinder::find() returns NXDOMAIN with non NSEC RR.
+    mock_finder->setNSECResult(Name("badnsec.example.com"),
+                               ZoneFinder::NXDOMAIN,
+                               mock_finder->delegation_rrset_);
+    EXPECT_THROW(Query(memory_client, Name("badnsec.example.com"), qtype,
+                       response, true).process(),
+                 std::bad_cast);
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC2) {
+    // ZoneFinder::find() returns NXDOMAIN with an empty NSEC RR.
+    mock_finder->setNSECResult(Name("emptynsec.example.com"),
+                               ZoneFinder::NXDOMAIN,
+                               mock_finder->empty_nsec_rrset_);
+    EXPECT_THROW(Query(memory_client, Name("emptynsec.example.com"), qtype,
+                       response, true).process(),
+                 Query::BadNSEC);
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC3) {
+    // "no-wildcard proof" returns SUCCESS.  it should be NXDOMAIN.
+    mock_finder->setNSECResult(Name("*.example.com"),
+                               ZoneFinder::SUCCESS,
+                               mock_finder->delegation_rrset_);
+    EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
+                       response, true).process(),
+                 Query::BadNSEC);
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC4) {
+    // "no-wildcard proof" doesn't return RRset.
+    mock_finder->setNSECResult(Name("*.example.com"),
+                               ZoneFinder::NXDOMAIN, ConstRRsetPtr());
+    EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
+                       response, true).process(),
+                 Query::BadNSEC);
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC5) {
+    // "no-wildcard proof" returns non NSEC.
+    mock_finder->setNSECResult(Name("*.example.com"),
+                               ZoneFinder::NXDOMAIN,
+                               mock_finder->delegation_rrset_);
+    // This is a bit odd, but we'll simply include the returned RRset.
+    Query(memory_client, Name("nxdomain.example.com"), qtype,
+          response, true).process();
+    responseCheck(response, Rcode::NXDOMAIN(), AA_FLAG, 0, 8, 0,
+                  NULL, (string(soa_txt) +
+                         string("example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("SOA") + "\n" +
+                         string(nsec_nxdomain_txt) + "\n" +
+                         string("noglue.example.com. 3600 IN RRSIG ") +
+                         getCommonRRSIGText("NSEC") + "\n" +
+                         delegation_txt).c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxdomainBadNSEC6) {
+    // "no-wildcard proof" returns empty NSEC.
+    mock_finder->setNSECResult(Name("*.example.com"),
+                               ZoneFinder::NXDOMAIN,
+                               mock_finder->empty_nsec_rrset_);
+    EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"), qtype,
+                       response, true).process(),
+                 Query::BadNSEC);
 }
 
 TEST_F(QueryTest, nxrrset) {
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("www.example.com"),
+    EXPECT_NO_THROW(Query(memory_client, Name("www.example.com"),
                           RRType::TXT(), response).process());
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
-                  NULL, soa_txt, NULL, mock_zone->getOrigin());
+                  NULL, soa_txt, NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxrrsetWithNSEC) {
+    // NXRRSET with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXRRSET and their RRSIGs.
+    Query(memory_client, Name("www.example.com"), RRType::TXT(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_www_txt) + "\n" +
+                   string("www.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")).c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, emptyNameWithNSEC) {
+    // Empty non terminal with DNSSEC proof.  This is one of the cases of
+    // Section 3.1.3.2 of RFC4035.
+    // mx.example.com. NSEC ).no.example.com. proves no.example.com. is a
+    // non empty terminal node.  Note that it also implicitly proves there
+    // should be no closer wildcard match (because the empty name is an
+    // exact match), so we only need one NSEC.
+    // From the point of the Query::process(), this is actually no different
+    // from the other NXRRSET case, but we check that explicitly just in case.
+    Query(memory_client, Name("no.example.com"), RRType::A(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_mx_txt) + "\n" +
+                   string("mx.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")).c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, nxrrsetWithoutNSEC) {
+    // NXRRSET with DNSSEC proof requested, but there's no NSEC at that node.
+    // This is an unexpected event (if the zone is supposed to be properly
+    // signed with NSECs), but we accept and ignore the oddity.
+    Query(memory_client, Name("nonsec.example.com"), RRType::TXT(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 2, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardNSEC) {
+    // The qname matches *.wild.example.com.  The response should contain
+    // an NSEC that proves the non existence of a closer name.
+    Query(memory_client, Name("www.wild.example.com"), RRType::A(), response,
+          true).process();
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 6, 6,
+                  (string(wild_txt).replace(0, 1, "www") +
+                   string("www.wild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("A") + "\n").c_str(),
+                  (zone_ns_txt + string("example.com. 3600 IN RRSIG NS 5 "
+                                        "3 3600 20000101000000 "
+                                        "20000201000000 12345 "
+                                        "example.com. FAKEFAKEFAKE\n") +
+                   string(nsec_wild_txt) +
+                   string("*.wild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC") + "\n").c_str(),
+                  NULL, // we are not interested in additionals in this test
+                  mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, CNAMEwildNSEC) {
+    // Similar to the previous case, but the matching wildcard record is
+    // CNAME.
+    Query(memory_client, Name("www.cnamewild.example.com"), RRType::A(),
+          response, true).process();
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 2, 0,
+                  (string(cnamewild_txt).replace(0, 1, "www") +
+                   string("www.cnamewild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("CNAME") + "\n").c_str(),
+                  (string(nsec_cnamewild_txt) +
+                   string("*.cnamewild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC") + "\n").c_str(),
+                  NULL, // we are not interested in additionals in this test
+                  mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, badWildcardProof1) {
+    // Unexpected case in wildcard proof: ZoneFinder::find() returns SUCCESS
+    // when NXDOMAIN is expected.
+    mock_finder->setNSECResult(Name("www.wild.example.com"),
+                               ZoneFinder::SUCCESS,
+                               mock_finder->delegation_rrset_);
+    EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+                       RRType::A(), response, true).process(),
+                 Query::BadNSEC);
+}
+
+TEST_F(QueryTest, badWildcardProof2) {
+    // "wildcard proof" doesn't return RRset.
+    mock_finder->setNSECResult(Name("www.wild.example.com"),
+                               ZoneFinder::NXDOMAIN, ConstRRsetPtr());
+    EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+                       RRType::A(), response, true).process(),
+                 Query::BadNSEC);
+}
+
+TEST_F(QueryTest, badWildcardProof3) {
+    // "wildcard proof" returns empty NSEC.
+    mock_finder->setNSECResult(Name("www.wild.example.com"),
+                               ZoneFinder::NXDOMAIN,
+                               mock_finder->empty_nsec_rrset_);
+    EXPECT_THROW(Query(memory_client, Name("www.wild.example.com"),
+                       RRType::A(), response, true).process(),
+                 Query::BadNSEC);
+}
+
+TEST_F(QueryTest, wildcardNxrrsetWithDuplicateNSEC) {
+    // WILDCARD_NXRRSET with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXRRSET and their RRSIGs. In this case we only need one NSEC,
+    // which proves both NXDOMAIN and the non existence RRSETs of wildcard.
+    Query(memory_client, Name("www.wild.example.com"), RRType::TXT(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_wild_txt) +
+                   string("*.wild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardNxrrsetWithNSEC) {
+    // WILDCARD_NXRRSET with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXRRSET and their RRSIGs. In this case we need two NSEC RRs,
+    // one proves NXDOMAIN and the other proves non existence RRSETs of wildcard.
+    Query(memory_client, Name("www1.uwild.example.com"), RRType::TXT(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_wild_txt_nxrrset) +
+                   string("*.uwild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n" +
+                   string(nsec_wild_txt_next) +
+                   string("www.uwild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC") + "\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardEmptyWithNSEC) {
+    // WILDCARD_EMPTY with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXDOMAIN and their RRSIGs. In this case we need two NSEC RRs,
+    // one proves NXDOMAIN and the other proves non existence wildcard.
+    Query(memory_client, Name("a.t.example.com"), RRType::A(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_empty_prev_txt) +
+                   string("t.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n" +
+                   string(nsec_empty_txt) +
+                   string("b.*.t.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n").c_str(),
+                  NULL, mock_finder->getOrigin());
 }
 
 /*
@@ -412,22 +1034,22 @@ TEST_F(QueryTest, nxrrset) {
  */
 TEST_F(QueryTest, noSOA) {
     // disable zone's SOA RR.
-    mock_zone->setSOAFlag(false);
+    mock_finder->setSOAFlag(false);
 
     // The NX Domain
-    EXPECT_THROW(Query(memory_datasrc, Name("nxdomain.example.com"),
+    EXPECT_THROW(Query(memory_client, Name("nxdomain.example.com"),
                        qtype, response).process(), Query::NoSOA);
     // Of course, we don't look into the response, as it throwed
 
     // NXRRSET
-    EXPECT_THROW(Query(memory_datasrc, Name("nxrrset.example.com"),
+    EXPECT_THROW(Query(memory_client, Name("nxrrset.example.com"),
                        qtype, response).process(), Query::NoSOA);
 }
 
 TEST_F(QueryTest, noMatchZone) {
     // there's a zone in the memory datasource but it doesn't match the qname.
     // should result in REFUSED.
-    Query(memory_datasrc, Name("example.org"), qtype, response).process();
+    Query(memory_client, Name("example.org"), qtype, response).process();
     EXPECT_EQ(Rcode::REFUSED(), response.getRcode());
 }
 
@@ -438,7 +1060,7 @@ TEST_F(QueryTest, noMatchZone) {
  * A record, other to unknown out of zone one.
  */
 TEST_F(QueryTest, MX) {
-    Query(memory_datasrc, Name("mx.example.com"), RRType::MX(),
+    Query(memory_client, Name("mx.example.com"), RRType::MX(),
           response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 3, 3, 4,
@@ -452,7 +1074,7 @@ TEST_F(QueryTest, MX) {
  * This should not trigger the additional processing for the exchange.
  */
 TEST_F(QueryTest, MXAlias) {
-    Query(memory_datasrc, Name("cnamemx.example.com"), RRType::MX(),
+    Query(memory_client, Name("cnamemx.example.com"), RRType::MX(),
           response).process();
 
     // there shouldn't be no additional RRs for the exchanges (we have 3
@@ -472,7 +1094,7 @@ TEST_F(QueryTest, MXAlias) {
  * returned.
  */
 TEST_F(QueryTest, CNAME) {
-    Query(memory_datasrc, Name("cname.example.com"), RRType::A(),
+    Query(memory_client, Name("cname.example.com"), RRType::A(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -482,7 +1104,7 @@ TEST_F(QueryTest, CNAME) {
 TEST_F(QueryTest, explicitCNAME) {
     // same owner name as the CNAME test but explicitly query for CNAME RR.
     // expect the same response as we don't provide a full chain yet.
-    Query(memory_datasrc, Name("cname.example.com"), RRType::CNAME(),
+    Query(memory_client, Name("cname.example.com"), RRType::CNAME(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -494,7 +1116,7 @@ TEST_F(QueryTest, CNAME_NX_RRSET) {
     // note: with chaining, what should be expected is not trivial:
     // BIND 9 returns the CNAME in answer and SOA in authority, no additional.
     // NSD returns the CNAME, NS in authority, A/AAAA for NS in additional.
-    Query(memory_datasrc, Name("cname.example.com"), RRType::TXT(),
+    Query(memory_client, Name("cname.example.com"), RRType::TXT(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -503,7 +1125,7 @@ TEST_F(QueryTest, CNAME_NX_RRSET) {
 
 TEST_F(QueryTest, explicitCNAME_NX_RRSET) {
     // same owner name as the NXRRSET test but explicitly query for CNAME RR.
-    Query(memory_datasrc, Name("cname.example.com"), RRType::CNAME(),
+    Query(memory_client, Name("cname.example.com"), RRType::CNAME(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -517,7 +1139,7 @@ TEST_F(QueryTest, CNAME_NX_DOMAIN) {
     // RCODE being NXDOMAIN.
     // NSD returns the CNAME, NS in authority, A/AAAA for NS in additional,
     // RCODE being NOERROR.
-    Query(memory_datasrc, Name("cnamenxdom.example.com"), RRType::A(),
+    Query(memory_client, Name("cnamenxdom.example.com"), RRType::A(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -526,7 +1148,7 @@ TEST_F(QueryTest, CNAME_NX_DOMAIN) {
 
 TEST_F(QueryTest, explicitCNAME_NX_DOMAIN) {
     // same owner name as the NXDOMAIN test but explicitly query for CNAME RR.
-    Query(memory_datasrc, Name("cnamenxdom.example.com"), RRType::CNAME(),
+    Query(memory_client, Name("cnamenxdom.example.com"), RRType::CNAME(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -542,7 +1164,7 @@ TEST_F(QueryTest, CNAME_OUT) {
      * Then the same test should be done with .org included there and
      * see what it does (depends on what we want to do)
      */
-    Query(memory_datasrc, Name("cnameout.example.com"), RRType::A(),
+    Query(memory_client, Name("cnameout.example.com"), RRType::A(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 0, 0,
@@ -551,7 +1173,7 @@ TEST_F(QueryTest, CNAME_OUT) {
 
 TEST_F(QueryTest, explicitCNAME_OUT) {
     // same owner name as the OUT test but explicitly query for CNAME RR.
-    Query(memory_datasrc, Name("cnameout.example.com"), RRType::CNAME(),
+    Query(memory_client, Name("cnameout.example.com"), RRType::CNAME(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -567,7 +1189,7 @@ TEST_F(QueryTest, explicitCNAME_OUT) {
  * pointing to NXRRSET and NXDOMAIN cases (similarly as with CNAME).
  */
 TEST_F(QueryTest, DNAME) {
-    Query(memory_datasrc, Name("www.dname.example.com"), RRType::A(),
+    Query(memory_client, Name("www.dname.example.com"), RRType::A(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 0, 0,
@@ -583,7 +1205,7 @@ TEST_F(QueryTest, DNAME) {
  * DNAME.
  */
 TEST_F(QueryTest, DNAME_ANY) {
-    Query(memory_datasrc, Name("www.dname.example.com"), RRType::ANY(),
+    Query(memory_client, Name("www.dname.example.com"), RRType::ANY(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 2, 0, 0,
@@ -592,7 +1214,7 @@ TEST_F(QueryTest, DNAME_ANY) {
 
 // Test when we ask for DNAME explicitly, it does no synthetizing.
 TEST_F(QueryTest, explicitDNAME) {
-    Query(memory_datasrc, Name("dname.example.com"), RRType::DNAME(),
+    Query(memory_client, Name("dname.example.com"), RRType::DNAME(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -604,7 +1226,7 @@ TEST_F(QueryTest, explicitDNAME) {
  * the CNAME, it should return the RRset.
  */
 TEST_F(QueryTest, DNAME_A) {
-    Query(memory_datasrc, Name("dname.example.com"), RRType::A(),
+    Query(memory_client, Name("dname.example.com"), RRType::A(),
         response).process();
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 1, 3, 3,
@@ -616,11 +1238,11 @@ TEST_F(QueryTest, DNAME_A) {
  * It should not synthetize the CNAME.
  */
 TEST_F(QueryTest, DNAME_NX_RRSET) {
-    EXPECT_NO_THROW(Query(memory_datasrc, Name("dname.example.com"),
+    EXPECT_NO_THROW(Query(memory_client, Name("dname.example.com"),
         RRType::TXT(), response).process());
 
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 1, 0,
-        NULL, soa_txt, NULL, mock_zone->getOrigin());
+        NULL, soa_txt, NULL, mock_finder->getOrigin());
 }
 
 /*
@@ -636,7 +1258,7 @@ TEST_F(QueryTest, LongDNAME) {
         "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
         "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
         "dname.example.com.");
-    EXPECT_NO_THROW(Query(memory_datasrc, longname, RRType::A(),
+    EXPECT_NO_THROW(Query(memory_client, longname, RRType::A(),
         response).process());
 
     responseCheck(response, Rcode::YXDOMAIN(), AA_FLAG, 1, 0, 0,
@@ -655,7 +1277,7 @@ TEST_F(QueryTest, MaxLenDNAME) {
         "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
         "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."
         "dname.example.com.");
-    EXPECT_NO_THROW(Query(memory_datasrc, longname, RRType::A(),
+    EXPECT_NO_THROW(Query(memory_client, longname, RRType::A(),
         response).process());
 
     // Check the answer is OK
diff --git a/src/bin/auth/tests/statistics_unittest.cc b/src/bin/auth/tests/statistics_unittest.cc
index f8b83f8..3f19f91 100644
--- a/src/bin/auth/tests/statistics_unittest.cc
+++ b/src/bin/auth/tests/statistics_unittest.cc
@@ -16,6 +16,8 @@
 
 #include <gtest/gtest.h>
 
+#include <boost/bind.hpp>
+
 #include <cc/data.h>
 #include <cc/session.h>
 
@@ -76,6 +78,13 @@ protected:
     }
     MockSession statistics_session_;
     AuthCounters counters;
+    // no need to be inherited from the original class here.
+    class MockModuleSpec {
+    public:
+        bool validateStatistics(ConstElementPtr, const bool valid) const
+            { return (valid); }
+    };
+    MockModuleSpec module_spec_;
 };
 
 void
@@ -180,7 +189,7 @@ TEST_F(AuthCountersTest, submitStatisticsWithException) {
     statistics_session_.setThrowSessionTimeout(false);
 }
 
-TEST_F(AuthCountersTest, submitStatistics) {
+TEST_F(AuthCountersTest, submitStatisticsWithoutValidator) {
     // Submit statistics data.
     // Validate if it submits correct data.
 
@@ -200,12 +209,69 @@ TEST_F(AuthCountersTest, submitStatistics) {
     // Command is "set".
     EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
                          ->get(0)->stringValue());
+    EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+                         ->get(1)->get("owner")->stringValue());
     ConstElementPtr statistics_data = statistics_session_.sent_msg
                                           ->get("command")->get(1)
-                                          ->get("stats_data");
+                                          ->get("data");
     // UDP query counter is 2 and TCP query counter is 1.
-    EXPECT_EQ(2, statistics_data->get("auth.queries.udp")->intValue());
-    EXPECT_EQ(1, statistics_data->get("auth.queries.tcp")->intValue());
+    EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+    EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
 }
 
+TEST_F(AuthCountersTest, submitStatisticsWithValidator) {
+
+    //a validator for the unittest
+    AuthCounters::validator_type validator;
+    ConstElementPtr el;
+
+    // Submit statistics data with correct statistics validator.
+    validator = boost::bind(
+        &AuthCountersTest::MockModuleSpec::validateStatistics,
+        &module_spec_, _1, true);
+
+    EXPECT_TRUE(validator(el));
+
+    // register validator to AuthCounters
+    counters.registerStatisticsValidator(validator);
+
+    // Counters should be initialized to 0.
+    EXPECT_EQ(0, counters.getCounter(AuthCounters::SERVER_UDP_QUERY));
+    EXPECT_EQ(0, counters.getCounter(AuthCounters::SERVER_TCP_QUERY));
+
+    // UDP query counter is set to 2.
+    counters.inc(AuthCounters::SERVER_UDP_QUERY);
+    counters.inc(AuthCounters::SERVER_UDP_QUERY);
+    // TCP query counter is set to 1.
+    counters.inc(AuthCounters::SERVER_TCP_QUERY);
+
+    // checks the value returned by submitStatistics
+    EXPECT_TRUE(counters.submitStatistics());
+
+    // Destination is "Stats".
+    EXPECT_EQ("Stats", statistics_session_.msg_destination);
+    // Command is "set".
+    EXPECT_EQ("set", statistics_session_.sent_msg->get("command")
+                         ->get(0)->stringValue());
+    EXPECT_EQ("Auth", statistics_session_.sent_msg->get("command")
+                         ->get(1)->get("owner")->stringValue());
+    ConstElementPtr statistics_data = statistics_session_.sent_msg
+                                          ->get("command")->get(1)
+                                          ->get("data");
+    // UDP query counter is 2 and TCP query counter is 1.
+    EXPECT_EQ(2, statistics_data->get("queries.udp")->intValue());
+    EXPECT_EQ(1, statistics_data->get("queries.tcp")->intValue());
+
+    // Submit statistics data with incorrect statistics validator.
+    validator = boost::bind(
+        &AuthCountersTest::MockModuleSpec::validateStatistics,
+        &module_spec_, _1, false);
+
+    EXPECT_FALSE(validator(el));
+
+    counters.registerStatisticsValidator(validator);
+
+    // checks the value returned by submitStatistics
+    EXPECT_FALSE(counters.submitStatistics());
+}
 }
diff --git a/src/bin/auth/tests/testdata/Makefile.am b/src/bin/auth/tests/testdata/Makefile.am
index f6f1f27..c86722f 100644
--- a/src/bin/auth/tests/testdata/Makefile.am
+++ b/src/bin/auth/tests/testdata/Makefile.am
@@ -23,4 +23,4 @@ EXTRA_DIST += example.com
 EXTRA_DIST += example.sqlite3
 
 .spec.wire:
-	$(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+	$(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/bin/bind10/Makefile.am b/src/bin/bind10/Makefile.am
index 126c429..69ea256 100644
--- a/src/bin/bind10/Makefile.am
+++ b/src/bin/bind10/Makefile.am
@@ -1,10 +1,16 @@
 SUBDIRS = . tests
 
 sbin_SCRIPTS = bind10
-CLEANFILES = bind10 bind10.pyc bind10_messages.py bind10_messages.pyc
+CLEANFILES = bind10 bind10_src.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.pyc
 
 pkglibexecdir = $(libexecdir)/@PACKAGE@
-pyexec_DATA = bind10_messages.py
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+noinst_SCRIPTS = run_bind10.sh
 
 bind10dir = $(pkgdatadir)
 bind10_DATA = bob.spec
@@ -20,13 +26,15 @@ bind10.8: bind10.xml
 
 endif
 
-bind10_messages.py: bind10_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/bind10/bind10_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py : bind10_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/bind10_messages.mes
 
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-bind10: bind10.py
+bind10: bind10_src.py $(PYTHON_LOGMSGPKG_DIR)/work/bind10_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
-	       -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10.py >$@
+	       -e "s|@@LIBDIR@@|$(libdir)|" \
+	       -e "s|@@LIBEXECDIR@@|$(pkglibexecdir)|" bind10_src.py >$@
 	chmod a+x $@
 
 pytest:
diff --git a/src/bin/bind10/TODO b/src/bin/bind10/TODO
index eb0abcd..6f50dbd 100644
--- a/src/bin/bind10/TODO
+++ b/src/bin/bind10/TODO
@@ -1,19 +1,13 @@
 - Read msgq configuration from configuration manager (Trac #213)
   https://bind10.isc.org/ticket/213
 - Provide more administrator options:
-  - Get process list
   - Get information on a process (returns list of times started & stopped, 
     plus current information such as PID)
-  - Add a component (not necessary for parking lot, but...)
   - Stop a component
   - Force-stop a component
 - Mechanism to wait for child to start before continuing
-- Way to ask a child to die politely 
-- Start statistics daemon
-- Statistics interaction (?)
 - Use .spec file to define comands
 - Rename "c-channel" stuff to msgq for clarity
-- Use logger
 - Reply to shutdown message?
 - Some sort of group creation so termination signals can be sent to
   children of children processes (if any)
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index d5ab905..c2e44e7 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,12 +2,12 @@
 .\"     Title: bind10
 .\"    Author: [see the "AUTHORS" section]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: March 31, 2011
+.\"      Date: November 23, 2011
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"
-.TH "BIND10" "8" "March 31, 2011" "BIND10" "BIND10"
+.TH "BIND10" "8" "November 23, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" -----------------------------------------------------------------
@@ -22,7 +22,7 @@
 bind10 \- BIND 10 boss process
 .SH "SYNOPSIS"
 .HP \w'\fBbind10\fR\ 'u
-\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-\-brittle\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR]
+\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
 .SH "DESCRIPTION"
 .PP
 The
@@ -32,13 +32,6 @@ daemon starts up other BIND 10 required daemons\&. It handles restarting of exit
 .PP
 The arguments are as follows:
 .PP
-\fB\-\-brittle\fR
-.RS 4
-Shutdown if any of the child processes of
-\fBbind10\fR
-exit\&. This is intended to help developers debug the server, and should not be used in production\&.
-.RE
-.PP
 \fB\-c\fR \fIconfig\-filename\fR, \fB\-\-config\-file\fR \fIconfig\-filename\fR
 .RS 4
 The configuration filename to use\&. Can be either absolute or relative to data path\&. In case it is absolute, value of data path is not considered\&.
@@ -107,6 +100,221 @@ Display more about what is going on for
 \fBbind10\fR
 and its child processes\&.
 .RE
+.PP
+\fB\-w\fR \fIwait_time\fR, \fB\-\-wait\fR \fIwait_time\fR
+.RS 4
+Sets the amount of time that BIND 10 will wait for the configuration manager (a key component of BIND 10) to initialize itself before abandoning the start up and terminating with an error\&. The wait_time is specified in seconds and has a default value of 10\&.
+.RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The configuration provides settings for components for
+\fBbind10\fR
+to manage under
+\fI/Boss/components/\fR\&. The default elements are:
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-auth\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-cmdctl\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/setuid\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-stats\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-stats\-httpd\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-xfrin\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-xfrout\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-zonemgr\fR
+.RE
+.PP
+(Note that the startup of
+\fBb10\-sockcreator\fR,
+\fBb10\-cfgmgr\fR, and
+\fBb10\-msgq\fR
+is not configurable\&. It is hardcoded and
+\fBbind10\fR
+will not run without them\&.)
+.PP
+These named sets (listed above) contain the following settings:
+.PP
+\fIaddress\fR
+.RS 4
+The name used for communicating to it on the message bus\&.
+.RE
+.PP
+\fIkind\fR
+.RS 4
+This defines how required a component is\&. The possible settings for
+\fIkind\fR
+are:
+\fIcore\fR
+(system won\'t start if it won\'t start and
+\fBbind10\fR
+will shutdown if a
+\(lqcore\(rq
+component crashes),
+\fIdispensable\fR
+(\fBbind10\fR
+will restart failing component), and
+\fIneeded\fR
+(\fBbind10\fR
+will shutdown if component won\'t initially start, but if crashes later, it will attempt to restart)\&. This setting is required\&.
+.RE
+.PP
+\fIpriority\fR
+.RS 4
+This is an integer\&.
+\fBbind10\fR
+will start the components with largest priority numbers first\&.
+.RE
+.PP
+\fIprocess\fR
+.RS 4
+This is the filename of the executable to be started\&. If not defined, then
+\fBbind10\fR
+will use the component name instead\&.
+.RE
+.PP
+\fIspecial\fR
+.RS 4
+This defines if the component is started a special way\&.
+.RE
+.PP
+The
+\fIBoss\fR
+configuration commands are:
+.PP
+
+\fBgetstats\fR
+tells
+\fBbind10\fR
+to send its statistics data to the
+\fBb10\-stats\fR
+daemon\&. This is an internal command and not exposed to the administrator\&.
+
+.PP
+
+\fBping\fR
+is used to check the connection with the
+\fBbind10\fR
+daemon\&. It returns the text
+\(lqpong\(rq\&.
+.PP
+
+\fBsendstats\fR
+tells
+\fBbind10\fR
+to send its statistics data to the
+\fBb10\-stats\fR
+daemon immediately\&.
+.PP
+
+\fBshow_processes\fR
+lists the current processes managed by
+\fBbind10\fR\&. The output is an array in JSON format containing the process ID and the name for each\&.
+
+
+.PP
+
+\fBshutdown\fR
+tells
+\fBbind10\fR
+to shutdown the BIND 10 servers\&. It will tell each process it manages to shutdown and, when complete,
+\fBbind10\fR
+will exit\&.
+.SH "STATISTICS DATA"
+.PP
+The statistics data collected by the
+\fBb10\-stats\fR
+daemon include:
+.PP
+bind10\&.boot_time
+.RS 4
+The date and time that the
+\fBbind10\fR
+process started\&. This is represented in ISO 8601 format\&.
+.RE
 .SH "SEE ALSO"
 .PP
 
diff --git a/src/bin/bind10/bind10.py.in b/src/bin/bind10/bind10.py.in
deleted file mode 100755
index a624383..0000000
--- a/src/bin/bind10/bind10.py.in
+++ /dev/null
@@ -1,1037 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010,2011  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-This file implements the Boss of Bind (BoB, or bob) program.
-
-Its purpose is to start up the BIND 10 system, and then manage the
-processes, by starting and stopping processes, plus restarting
-processes that exit.
-
-To start the system, it first runs the c-channel program (msgq), then
-connects to that. It then runs the configuration manager, and reads
-its own configuration. Then it proceeds to starting other modules.
-
-The Python subprocess module is used for starting processes, but
-because this is not efficient for managing groups of processes,
-SIGCHLD signals are caught and processed using the signal module.
-
-Most of the logic is contained in the BoB class. However, since Python
-requires that signal processing happen in the main thread, we do
-signal handling outside of that class, in the code running for
-__main__.
-"""
-
-import sys; sys.path.append ('@@PYTHONPATH@@')
-import os
-
-# If B10_FROM_SOURCE is set in the environment, we use data files
-# from a directory relative to that, otherwise we use the ones
-# installed on the system
-if "B10_FROM_SOURCE" in os.environ:
-    SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
-else:
-    PREFIX = "@prefix@"
-    DATAROOTDIR = "@datarootdir@"
-    SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-    
-import subprocess
-import signal
-import re
-import errno
-import time
-import select
-import random
-import socket
-from optparse import OptionParser, OptionValueError
-import io
-import pwd
-import posix
-
-import isc.cc
-import isc.util.process
-import isc.net.parse
-import isc.log
-from bind10_messages import *
-
-isc.log.init("b10-boss")
-logger = isc.log.Logger("boss")
-
-# Pending system-wide debug level definitions, the ones we
-# use here are hardcoded for now
-DBG_PROCESS = 10
-DBG_COMMANDS = 30
-
-# Assign this process some longer name
-isc.util.process.rename(sys.argv[0])
-
-# This is the version that gets displayed to the user.
-# The VERSION string consists of the module name, the module version
-# number, and the overall BIND 10 version number (set in configure.ac).
-VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
-
-# This is for bind10.boottime of stats module
-_BASETIME = time.gmtime()
-
-class RestartSchedule:
-    """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if 
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
-  * If a process was been running for >=10 seconds, we restart it
-    right away.
-  * If a process was running for <10 seconds, we wait until 10 seconds
-    after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
-    def __init__(self, restart_frequency=10.0):
-        self.restart_frequency = restart_frequency
-        self.run_start_time = None
-        self.run_stop_time = None
-        self.restart_time = None
-    
-    def set_run_start_time(self, when=None):
-        if when is None:
-            when = time.time()
-        self.run_start_time = when
-        sigma = self.restart_frequency * 0.05
-        self.restart_time = when + random.normalvariate(self.restart_frequency, 
-                                                        sigma)
-
-    def set_run_stop_time(self, when=None):
-        """We don't actually do anything with stop time now, but it 
-        might be useful for future algorithms."""
-        if when is None:
-            when = time.time()
-        self.run_stop_time = when
-
-    def get_restart_time(self, when=None):
-        if when is None:
-            when = time.time()
-        return max(when, self.restart_time)
-
-class ProcessInfoError(Exception): pass
-
-class ProcessInfo:
-    """Information about a process"""
-
-    dev_null = open(os.devnull, "w")
-
-    def __init__(self, name, args, env={}, dev_null_stdout=False,
-                 dev_null_stderr=False, uid=None, username=None):
-        self.name = name 
-        self.args = args
-        self.env = env
-        self.dev_null_stdout = dev_null_stdout
-        self.dev_null_stderr = dev_null_stderr
-        self.restart_schedule = RestartSchedule()
-        self.uid = uid
-        self.username = username
-        self.process = None
-        self.pid = None
-
-    def _preexec_work(self):
-        """Function used before running a program that needs to run as a
-        different user."""
-        # First, put us into a separate process group so we don't get
-        # SIGINT signals on Ctrl-C (the boss will shut everthing down by
-        # other means).
-        os.setpgrp()
-        # Second, set the user ID if one has been specified
-        if self.uid is not None:
-            try:
-                posix.setuid(self.uid)
-            except OSError as e:
-                if e.errno == errno.EPERM:
-                    # if we failed to change user due to permission report that
-                    raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
-                else:
-                    # otherwise simply re-raise whatever error we found
-                    raise
-
-    def _spawn(self):
-        if self.dev_null_stdout:
-            spawn_stdout = self.dev_null
-        else:
-            spawn_stdout = None
-        if self.dev_null_stderr:
-            spawn_stderr = self.dev_null
-        else:
-            spawn_stderr = None
-        # Environment variables for the child process will be a copy of those
-        # of the boss process with any additional specific variables given
-        # on construction (self.env).
-        spawn_env = os.environ
-        spawn_env.update(self.env)
-        if 'B10_FROM_SOURCE' not in os.environ:
-            spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
-        self.process = subprocess.Popen(self.args,
-                                        stdin=subprocess.PIPE,
-                                        stdout=spawn_stdout,
-                                        stderr=spawn_stderr,
-                                        close_fds=True,
-                                        env=spawn_env,
-                                        preexec_fn=self._preexec_work)
-        self.pid = self.process.pid
-        self.restart_schedule.set_run_start_time()
-
-    # spawn() and respawn() are the same for now, but in the future they
-    # may have different functionality
-    def spawn(self):
-        self._spawn()
-
-    def respawn(self):
-        self._spawn()
-
-class CChannelConnectError(Exception): pass
-
-class BoB:
-    """Boss of BIND class."""
-    
-    def __init__(self, msgq_socket_file=None, data_path=None,
-    config_filename=None, nocache=False, verbose=False, setuid=None,
-    username=None, cmdctl_port=None, brittle=False):
-        """
-            Initialize the Boss of BIND. This is a singleton (only one can run).
-        
-            The msgq_socket_file specifies the UNIX domain socket file that the
-            msgq process listens on.  If verbose is True, then the boss reports
-            what it is doing.
-
-            Data path and config filename are passed trough to config manager
-            (if provided) and specify the config file to be used.
-
-            The cmdctl_port is passed to cmdctl and specify on which port it
-            should listen.
-        """
-        self.cc_session = None
-        self.ccs = None
-        self.cfg_start_auth = True
-        self.cfg_start_resolver = False
-        self.cfg_start_dhcp6 = False
-        self.cfg_start_dhcp4 = False
-        self.started_auth_family = False
-        self.started_resolver_family = False
-        self.curproc = None
-        self.dead_processes = {}
-        self.msgq_socket_file = msgq_socket_file
-        self.nocache = nocache
-        self.processes = {}
-        self.expected_shutdowns = {}
-        self.runnable = False
-        self.uid = setuid
-        self.username = username
-        self.verbose = verbose
-        self.data_path = data_path
-        self.config_filename = config_filename
-        self.cmdctl_port = cmdctl_port
-        self.brittle = brittle
-
-    def config_handler(self, new_config):
-        # If this is initial update, don't do anything now, leave it to startup
-        if not self.runnable:
-            return
-        # Now we declare few functions used only internally here. Besides the
-        # benefit of not polluting the name space, they are closures, so we
-        # don't need to pass some variables
-        def start_stop(name, started, start, stop):
-            if not'start_' + name in new_config:
-                return
-            if new_config['start_' + name]:
-                if not started:
-                    if self.uid is not None:
-                        logger.info(BIND10_START_AS_NON_ROOT, name)
-                    start()
-            else:
-                stop()
-        # These four functions are passed to start_stop (smells like functional
-        # programming little bit)
-        def resolver_on():
-            self.start_resolver(self.c_channel_env)
-            self.started_resolver_family = True
-        def resolver_off():
-            self.stop_resolver()
-            self.started_resolver_family = False
-        def auth_on():
-            self.start_auth(self.c_channel_env)
-            self.start_xfrout(self.c_channel_env)
-            self.start_xfrin(self.c_channel_env)
-            self.start_zonemgr(self.c_channel_env)
-            self.started_auth_family = True
-        def auth_off():
-            self.stop_zonemgr()
-            self.stop_xfrin()
-            self.stop_xfrout()
-            self.stop_auth()
-            self.started_auth_family = False
-
-        # The real code of the config handler function follows here
-        logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
-                     new_config)
-        start_stop('resolver', self.started_resolver_family, resolver_on,
-            resolver_off)
-        start_stop('auth', self.started_auth_family, auth_on, auth_off)
-
-        answer = isc.config.ccsession.create_answer(0)
-        return answer
-
-    def get_processes(self):
-        pids = list(self.processes.keys())
-        pids.sort()
-        process_list = [ ]
-        for pid in pids:
-            process_list.append([pid, self.processes[pid].name])
-        return process_list
-
-    def command_handler(self, command, args):
-        logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
-        answer = isc.config.ccsession.create_answer(1, "command not implemented")
-        if type(command) != str:
-            answer = isc.config.ccsession.create_answer(1, "bad command")
-        else:
-            if command == "shutdown":
-                self.runnable = False
-                answer = isc.config.ccsession.create_answer(0)
-            elif command == "sendstats":
-                # send statistics data to the stats daemon immediately
-                cmd = isc.config.ccsession.create_command(
-                    'set', { "stats_data": {
-                            'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
-                            }})
-                seq = self.cc_session.group_sendmsg(cmd, 'Stats')
-                self.cc_session.group_recvmsg(True, seq)
-                answer = isc.config.ccsession.create_answer(0)
-            elif command == "ping":
-                answer = isc.config.ccsession.create_answer(0, "pong")
-            elif command == "show_processes":
-                answer = isc.config.ccsession. \
-                    create_answer(0, self.get_processes())
-            else:
-                answer = isc.config.ccsession.create_answer(1,
-                                                            "Unknown command")
-        return answer
-
-    def kill_started_processes(self):
-        """
-            Called as part of the exception handling when a process fails to
-            start, this runs through the list of started processes, killing
-            each one.  It then clears that list.
-        """
-        logger.info(BIND10_KILLING_ALL_PROCESSES)
-
-        for pid in self.processes:
-            logger.info(BIND10_KILL_PROCESS, self.processes[pid].name)
-            self.processes[pid].process.kill()
-        self.processes = {}
-
-    def read_bind10_config(self):
-        """
-            Reads the parameters associated with the BoB module itself.
-
-            At present these are the components to start although arguably this
-            information should be in the configuration for the appropriate
-            module itself. (However, this would cause difficulty in the case of
-            xfrin/xfrout and zone manager as we don't need to start those if we
-            are not running the authoritative server.)
-        """
-        logger.info(BIND10_READING_BOSS_CONFIGURATION)
-
-        config_data = self.ccs.get_full_config()
-        self.cfg_start_auth = config_data.get("start_auth")
-        self.cfg_start_resolver = config_data.get("start_resolver")
-
-        logger.info(BIND10_CONFIGURATION_START_AUTH, self.cfg_start_auth)
-        logger.info(BIND10_CONFIGURATION_START_RESOLVER, self.cfg_start_resolver)
-
-    def log_starting(self, process, port = None, address = None):
-        """
-            A convenience function to output a "Starting xxx" message if the
-            logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
-            Putting this into a separate method ensures
-            that the output form is consistent across all processes.
-
-            The process name (passed as the first argument) is put into
-            self.curproc, and is used to indicate which process failed to
-            start if there is an error (and is used in the "Started" message
-            on success).  The optional port and address information are
-            appended to the message (if present).
-        """
-        self.curproc = process
-        if port is None and address is None:
-            logger.info(BIND10_STARTING_PROCESS, self.curproc)
-        elif address is None:
-            logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
-                        port)
-        else:
-            logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
-                        self.curproc, address, port)
-
-    def log_started(self, pid = None):
-        """
-            A convenience function to output a 'Started xxxx (PID yyyy)'
-            message.  As with starting_message(), this ensures a consistent
-            format.
-        """
-        if pid is None:
-            logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
-        else:
-            logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
-
-    # The next few methods start the individual processes of BIND-10.  They
-    # are called via start_all_processes().  If any fail, an exception is
-    # raised which is caught by the caller of start_all_processes(); this kills
-    # processes started up to that point before terminating the program.
-
-    def start_msgq(self, c_channel_env):
-        """
-            Start the message queue and connect to the command channel.
-        """
-        self.log_starting("b10-msgq")
-        c_channel = ProcessInfo("b10-msgq", ["b10-msgq"], c_channel_env,
-                                True, not self.verbose, uid=self.uid,
-                                username=self.username)
-        c_channel.spawn()
-        self.processes[c_channel.pid] = c_channel
-        self.log_started(c_channel.pid)
-
-        # Now connect to the c-channel
-        cc_connect_start = time.time()
-        while self.cc_session is None:
-            # if we have been trying for "a while" give up
-            if (time.time() - cc_connect_start) > 5:
-                raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
-
-            # try to connect, and if we can't wait a short while
-            try:
-                self.cc_session = isc.cc.Session(self.msgq_socket_file)
-            except isc.cc.session.SessionError:
-                time.sleep(0.1)
-
-    def start_cfgmgr(self, c_channel_env):
-        """
-            Starts the configuration manager process
-        """
-        self.log_starting("b10-cfgmgr")
-        args = ["b10-cfgmgr"]
-        if self.data_path is not None:
-            args.append("--data-path=" + self.data_path)
-        if self.config_filename is not None:
-            args.append("--config-filename=" + self.config_filename)
-        bind_cfgd = ProcessInfo("b10-cfgmgr", args,
-                                c_channel_env, uid=self.uid,
-                                username=self.username)
-        bind_cfgd.spawn()
-        self.processes[bind_cfgd.pid] = bind_cfgd
-        self.log_started(bind_cfgd.pid)
-
-        # sleep until b10-cfgmgr is fully up and running, this is a good place
-        # to have a (short) timeout on synchronized groupsend/receive
-        # TODO: replace the sleep by a listen for ConfigManager started
-        # message
-        time.sleep(1)
-
-    def start_ccsession(self, c_channel_env):
-        """
-            Start the CC Session
-
-            The argument c_channel_env is unused but is supplied to keep the
-            argument list the same for all start_xxx methods.
-        """
-        self.log_starting("ccsession")
-        self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION, 
-                                      self.config_handler,
-                                      self.command_handler)
-        self.ccs.start()
-        self.log_started()
-
-    # A couple of utility methods for starting processes...
-
-    def start_process(self, name, args, c_channel_env, port=None, address=None):
-        """
-            Given a set of command arguments, start the process and output
-            appropriate log messages.  If the start is successful, the process
-            is added to the list of started processes.
-
-            The port and address arguments are for log messages only.
-        """
-        self.log_starting(name, port, address)
-        newproc = ProcessInfo(name, args, c_channel_env)
-        newproc.spawn()
-        self.processes[newproc.pid] = newproc
-        self.log_started(newproc.pid)
-
-    def start_simple(self, name, c_channel_env, port=None, address=None):
-        """
-            Most of the BIND-10 processes are started with the command:
-
-                <process-name> [-v]
-
-            ... where -v is appended if verbose is enabled.  This method
-            generates the arguments from the name and starts the process.
-
-            The port and address arguments are for log messages only.
-        """
-        # Set up the command arguments.
-        args = [name]
-        if self.verbose:
-            args += ['-v']
-
-        # ... and start the process
-        self.start_process(name, args, c_channel_env, port, address)
-
-    # The next few methods start up the rest of the BIND-10 processes.
-    # Although many of these methods are little more than a call to
-    # start_simple, they are retained (a) for testing reasons and (b) as a place
-    # where modifications can be made if the process start-up sequence changes
-    # for a given process.
-
-    def start_auth(self, c_channel_env):
-        """
-            Start the Authoritative server
-        """
-        authargs = ['b10-auth']
-        if self.nocache:
-            authargs += ['-n']
-        if self.uid:
-            authargs += ['-u', str(self.uid)]
-        if self.verbose:
-            authargs += ['-v']
-
-        # ... and start
-        self.start_process("b10-auth", authargs, c_channel_env)
-
-    def start_resolver(self, c_channel_env):
-        """
-            Start the Resolver.  At present, all these arguments and switches
-            are pure speculation.  As with the auth daemon, they should be
-            read from the configuration database.
-        """
-        self.curproc = "b10-resolver"
-        # XXX: this must be read from the configuration manager in the future
-        resargs = ['b10-resolver']
-        if self.uid:
-            resargs += ['-u', str(self.uid)]
-        if self.verbose:
-            resargs += ['-v']
-
-        # ... and start
-        self.start_process("b10-resolver", resargs, c_channel_env)
-
-    def start_xfrout(self, c_channel_env):
-        self.start_simple("b10-xfrout", c_channel_env)
-
-    def start_xfrin(self, c_channel_env):
-        self.start_simple("b10-xfrin", c_channel_env)
-
-    def start_zonemgr(self, c_channel_env):
-        self.start_simple("b10-zonemgr", c_channel_env)
-
-    def start_stats(self, c_channel_env):
-        self.start_simple("b10-stats", c_channel_env)
-
-    def start_stats_httpd(self, c_channel_env):
-        self.start_simple("b10-stats-httpd", c_channel_env)
-
-    def start_dhcp6(self, c_channel_env):
-        self.start_simple("b10-dhcp6", c_channel_env)
-
-    def start_cmdctl(self, c_channel_env):
-        """
-            Starts the command control process
-        """
-        args = ["b10-cmdctl"]
-        if self.cmdctl_port is not None:
-            args.append("--port=" + str(self.cmdctl_port))
-        self.start_process("b10-cmdctl", args, c_channel_env, self.cmdctl_port)
-
-    def start_all_processes(self):
-        """
-            Starts up all the processes.  Any exception generated during the
-            starting of the processes is handled by the caller.
-        """
-        c_channel_env = self.c_channel_env
-        self.start_msgq(c_channel_env)
-        self.start_cfgmgr(c_channel_env)
-        self.start_ccsession(c_channel_env)
-
-        # Extract the parameters associated with Bob.  This can only be
-        # done after the CC Session is started.
-        self.read_bind10_config()
-
-        # Continue starting the processes.  The authoritative server (if
-        # selected):
-        if self.cfg_start_auth:
-            self.start_auth(c_channel_env)
-
-        # ... and resolver (if selected):
-        if self.cfg_start_resolver:
-            self.start_resolver(c_channel_env)
-            self.started_resolver_family = True
-
-        # Everything after the main components can run as non-root.
-        # TODO: this is only temporary - once the privileged socket creator is
-        # fully working, nothing else will run as root.
-        if self.uid is not None:
-            posix.setuid(self.uid)
-
-        # xfrin/xfrout and the zone manager are only meaningful if the
-        # authoritative server has been started.
-        if self.cfg_start_auth:
-            self.start_xfrout(c_channel_env)
-            self.start_xfrin(c_channel_env)
-            self.start_zonemgr(c_channel_env)
-            self.started_auth_family = True
-
-        # ... and finally start the remaining processes
-        self.start_stats(c_channel_env)
-        self.start_stats_httpd(c_channel_env)
-        self.start_cmdctl(c_channel_env)
-
-        if self.cfg_start_dhcp6:
-            self.start_dhcp6(c_channel_env)
-
-    def startup(self):
-        """
-            Start the BoB instance.
-
-            Returns None if successful, otherwise an string describing the
-            problem.
-        """
-        # Try to connect to the c-channel daemon, to see if it is already
-        # running
-        c_channel_env = {}
-        if self.msgq_socket_file is not None:
-             c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
-        logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
-        # try to connect, and if we can't wait a short while
-        try:
-            self.cc_session = isc.cc.Session(self.msgq_socket_file)
-            logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
-            return "b10-msgq already running, or socket file not cleaned , cannot start"
-        except isc.cc.session.SessionError:
-            # this is the case we want, where the msgq is not running
-            pass
-
-        # Start all processes.  If any one fails to start, kill all started
-        # processes and exit with an error indication.
-        try:
-            self.c_channel_env = c_channel_env
-            self.start_all_processes()
-        except Exception as e:
-            self.kill_started_processes()
-            return "Unable to start " + self.curproc + ": " + str(e)
-
-        # Started successfully
-        self.runnable = True
-        return None
-
-    def stop_all_processes(self):
-        """Stop all processes."""
-        cmd = { "command": ['shutdown']}
-
-        self.cc_session.group_sendmsg(cmd, 'Cmdctl', 'Cmdctl')
-        self.cc_session.group_sendmsg(cmd, "ConfigManager", "ConfigManager")
-        self.cc_session.group_sendmsg(cmd, "Auth", "Auth")
-        self.cc_session.group_sendmsg(cmd, "Resolver", "Resolver")
-        self.cc_session.group_sendmsg(cmd, "Xfrout", "Xfrout")
-        self.cc_session.group_sendmsg(cmd, "Xfrin", "Xfrin")
-        self.cc_session.group_sendmsg(cmd, "Zonemgr", "Zonemgr")
-        self.cc_session.group_sendmsg(cmd, "Stats", "Stats")
-        self.cc_session.group_sendmsg(cmd, "StatsHttpd", "StatsHttpd")
-
-    def stop_process(self, process, recipient):
-        """
-        Stop the given process, friendly-like. The process is the name it has
-        (in logs, etc), the recipient is the address on msgq.
-        """
-        logger.info(BIND10_STOP_PROCESS, process)
-        # TODO: Some timeout to solve processes that don't want to die would
-        # help. We can even store it in the dict, it is used only as a set
-        self.expected_shutdowns[process] = 1
-        # Ask the process to die willingly
-        self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
-            recipient)
-
-    # Series of stop_process wrappers
-    def stop_resolver(self):
-        self.stop_process('b10-resolver', 'Resolver')
-
-    def stop_auth(self):
-        self.stop_process('b10-auth', 'Auth')
-
-    def stop_xfrout(self):
-        self.stop_process('b10-xfrout', 'Xfrout')
-
-    def stop_xfrin(self):
-        self.stop_process('b10-xfrin', 'Xfrin')
-
-    def stop_zonemgr(self):
-        self.stop_process('b10-zonemgr', 'Zonemgr')
-
-    def shutdown(self):
-        """Stop the BoB instance."""
-        logger.info(BIND10_SHUTDOWN)
-        # first try using the BIND 10 request to stop
-        try:
-            self.stop_all_processes()
-        except:
-            pass
-        # XXX: some delay probably useful... how much is uncertain
-        # I have changed the delay from 0.5 to 1, but sometime it's 
-        # still not enough.
-        time.sleep(1)  
-        self.reap_children()
-        # next try sending a SIGTERM
-        processes_to_stop = list(self.processes.values())
-        for proc_info in processes_to_stop:
-            logger.info(BIND10_SEND_SIGTERM, proc_info.name,
-                        proc_info.pid)
-            try:
-                proc_info.process.terminate()
-            except OSError:
-                # ignore these (usually ESRCH because the child
-                # finally exited)
-                pass
-        # finally, send SIGKILL (unmaskable termination) until everybody dies
-        while self.processes:
-            # XXX: some delay probably useful... how much is uncertain
-            time.sleep(0.1)  
-            self.reap_children()
-            processes_to_stop = list(self.processes.values())
-            for proc_info in processes_to_stop:
-                logger.info(BIND10_SEND_SIGKILL, proc_info.name,
-                            proc_info.pid)
-                try:
-                    proc_info.process.kill()
-                except OSError:
-                    # ignore these (usually ESRCH because the child
-                    # finally exited)
-                    pass
-        logger.info(BIND10_SHUTDOWN_COMPLETE)
-
-    def _get_process_exit_status(self):
-        return os.waitpid(-1, os.WNOHANG)
-
-    def reap_children(self):
-        """Check to see if any of our child processes have exited, 
-        and note this for later handling. 
-        """
-        while True:
-            try:
-                (pid, exit_status) = self._get_process_exit_status()
-            except OSError as o:
-                if o.errno == errno.ECHILD: break
-                # XXX: should be impossible to get any other error here
-                raise
-            if pid == 0: break
-            if pid in self.processes:
-                # One of the processes we know about.  Get information on it.
-                proc_info = self.processes.pop(pid)
-                proc_info.restart_schedule.set_run_stop_time()
-                self.dead_processes[proc_info.pid] = proc_info
-
-                # Write out message, but only if in the running state:
-                # During startup and shutdown, these messages are handled
-                # elsewhere.
-                if self.runnable:
-                    if exit_status is None:
-                        logger.warn(BIND10_PROCESS_ENDED_NO_EXIT_STATUS,
-                                    proc_info.name, proc_info.pid)
-                    else:
-                        logger.warn(BIND10_PROCESS_ENDED_WITH_EXIT_STATUS,
-                                    proc_info.name, proc_info.pid,
-                                    exit_status)
-
-                    # Was it a special process?
-                    if proc_info.name == "b10-msgq":
-                        logger.fatal(BIND10_MSGQ_DAEMON_ENDED)
-                        self.runnable = False
-
-                # If we're in 'brittle' mode, we want to shutdown after
-                # any process dies.
-                if self.brittle:
-                    self.runnable = False
-            else:
-                logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
-
-    def restart_processes(self):
-        """
-            Restart any dead processes:
-
-            * Returns the time when the next process is ready to be restarted. 
-            * If the server is shutting down, returns 0.
-            * If there are no processes, returns None.
-
-            The values returned can be safely passed into select() as the 
-            timeout value.
-        """
-        next_restart = None
-        # if we're shutting down, then don't restart
-        if not self.runnable:
-            return 0
-        # otherwise look through each dead process and try to restart
-        still_dead = {}
-        now = time.time()
-        for proc_info in self.dead_processes.values():
-            if proc_info.name in self.expected_shutdowns:
-                # We don't restart, we wanted it to die
-                del self.expected_shutdowns[proc_info.name]
-                continue
-            restart_time = proc_info.restart_schedule.get_restart_time(now)
-            if restart_time > now:
-                if (next_restart is None) or (next_restart > restart_time):
-                    next_restart = restart_time
-                still_dead[proc_info.pid] = proc_info
-            else:
-                logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
-                try:
-                    proc_info.respawn()
-                    self.processes[proc_info.pid] = proc_info
-                    logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
-                except:
-                    still_dead[proc_info.pid] = proc_info
-        # remember any processes that refuse to be resurrected
-        self.dead_processes = still_dead
-        # return the time when the next process is ready to be restarted
-        return next_restart
-
-# global variables, needed for signal handlers
-options = None
-boss_of_bind = None
-
-def reaper(signal_number, stack_frame):
-    """A child process has died (SIGCHLD received)."""
-    # don't do anything... 
-    # the Python signal handler has been set up to write
-    # down a pipe, waking up our select() bit
-    pass
-
-def get_signame(signal_number):
-    """Return the symbolic name for a signal."""
-    for sig in dir(signal):
-        if sig.startswith("SIG") and sig[3].isalnum():
-            if getattr(signal, sig) == signal_number:
-                return sig
-    return "Unknown signal %d" % signal_number
-
-# XXX: perhaps register atexit() function and invoke that instead
-def fatal_signal(signal_number, stack_frame):
-    """We need to exit (SIGINT or SIGTERM received)."""
-    global options
-    global boss_of_bind
-    logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
-    signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-    boss_of_bind.runnable = False
-
-def process_rename(option, opt_str, value, parser):
-    """Function that renames the process if it is requested by a option."""
-    isc.util.process.rename(value)
-
-def parse_args(args=sys.argv[1:], Parser=OptionParser):
-    """
-    Function for parsing command line arguments. Returns the
-    options object from OptionParser.
-    """
-    parser = Parser(version=VERSION)
-    parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
-                      type="string", default=None,
-                      help="UNIX domain socket file the b10-msgq daemon will use")
-    parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
-                      default=False, help="disable hot-spot cache in authoritative DNS server")
-    parser.add_option("-u", "--user", dest="user", type="string", default=None,
-                      help="Change user after startup (must run as root)")
-    parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
-                      help="display more about what is going on")
-    parser.add_option("--pretty-name", type="string", action="callback",
-                      callback=process_rename,
-                      help="Set the process name (displayed in ps, top, ...)")
-    parser.add_option("-c", "--config-file", action="store",
-                      dest="config_file", default=None,
-                      help="Configuration database filename")
-    parser.add_option("-p", "--data-path", dest="data_path",
-                      help="Directory to search for configuration files",
-                      default=None)
-    parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
-                      default=None, help="Port of command control")
-    parser.add_option("--pid-file", dest="pid_file", type="string",
-                      default=None,
-                      help="file to dump the PID of the BIND 10 process")
-    parser.add_option("--brittle", dest="brittle", action="store_true",
-                      help="debugging flag: exit if any component dies")
-
-    (options, args) = parser.parse_args(args)
-
-    if options.cmdctl_port is not None:
-        try:
-            isc.net.parse.port_parse(options.cmdctl_port)
-        except ValueError as e:
-            parser.error(e)
-
-    if args:
-        parser.print_help()
-        sys.exit(1)
-
-    return options
-
-def dump_pid(pid_file):
-    """
-    Dump the PID of the current process to the specified file.  If the given
-    file is None this function does nothing.  If the file already exists,
-    the existing content will be removed.  If a system error happens in
-    creating or writing to the file, the corresponding exception will be
-    propagated to the caller.
-    """
-    if pid_file is None:
-        return
-    f = open(pid_file, "w")
-    f.write('%d\n' % os.getpid())
-    f.close()
-
-def unlink_pid_file(pid_file):
-    """
-    Remove the given file, which is basically expected to be the PID file
-    created by dump_pid().  The specified may or may not exist; if it
-    doesn't this function does nothing.  Other system level errors in removing
-    the file will be propagated as the corresponding exception.
-    """
-    if pid_file is None:
-        return
-    try:
-        os.unlink(pid_file)
-    except OSError as error:
-        if error.errno is not errno.ENOENT:
-            raise
-
-
-def main():
-    global options
-    global boss_of_bind
-    # Enforce line buffering on stdout, even when not a TTY
-    sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
-
-    options = parse_args()
-
-    # Check user ID.
-    setuid = None
-    username = None
-    if options.user:
-        # Try getting information about the user, assuming UID passed.
-        try:
-            pw_ent = pwd.getpwuid(int(options.user))
-            setuid = pw_ent.pw_uid
-            username = pw_ent.pw_name
-        except ValueError:
-            pass
-        except KeyError:
-            pass
-
-        # Next try getting information about the user, assuming user name 
-        # passed.
-        # If the information is both a valid user name and user number, we
-        # prefer the name because we try it second. A minor point, hopefully.
-        try:
-            pw_ent = pwd.getpwnam(options.user)
-            setuid = pw_ent.pw_uid
-            username = pw_ent.pw_name
-        except KeyError:
-            pass
-
-        if setuid is None:
-            logger.fatal(BIND10_INVALID_USER, options.user)
-            sys.exit(1)
-
-    # Announce startup.
-    logger.info(BIND10_STARTING, VERSION)
-
-    # Create wakeup pipe for signal handlers
-    wakeup_pipe = os.pipe()
-    signal.set_wakeup_fd(wakeup_pipe[1])
-
-    # Set signal handlers for catching child termination, as well
-    # as our own demise.
-    signal.signal(signal.SIGCHLD, reaper)
-    signal.siginterrupt(signal.SIGCHLD, False)
-    signal.signal(signal.SIGINT, fatal_signal)
-    signal.signal(signal.SIGTERM, fatal_signal)
-
-    # Block SIGPIPE, as we don't want it to end this process
-    signal.signal(signal.SIGPIPE, signal.SIG_IGN)
-
-    # Go bob!
-    boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
-                       options.config_file, options.nocache, options.verbose,
-                       setuid, username, options.cmdctl_port, options.brittle)
-    startup_result = boss_of_bind.startup()
-    if startup_result:
-        logger.fatal(BIND10_STARTUP_ERROR, startup_result)
-        sys.exit(1)
-    logger.info(BIND10_STARTUP_COMPLETE)
-    dump_pid(options.pid_file)
-
-    # In our main loop, we check for dead processes or messages 
-    # on the c-channel.
-    wakeup_fd = wakeup_pipe[0]
-    ccs_fd = boss_of_bind.ccs.get_socket().fileno()
-    while boss_of_bind.runnable:
-        # clean up any processes that exited
-        boss_of_bind.reap_children()
-        next_restart = boss_of_bind.restart_processes()
-        if next_restart is None:
-            wait_time = None
-        else:
-            wait_time = max(next_restart - time.time(), 0)
-
-        # select() can raise EINTR when a signal arrives, 
-        # even if they are resumable, so we have to catch
-        # the exception
-        try:
-            (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [], 
-                                                  wait_time)
-        except select.error as err:
-            if err.args[0] == errno.EINTR:
-                (rlist, wlist, xlist) = ([], [], [])
-            else:
-                logger.fatal(BIND10_SELECT_ERROR, err)
-                break
-
-        for fd in rlist + xlist:
-            if fd == ccs_fd:
-                try:
-                    boss_of_bind.ccs.check_command()
-                except isc.cc.session.ProtocolError:
-                    logger.fatal(BIND10_MSGQ_DISAPPEARED)
-                    self.runnable = False
-                    break
-            elif fd == wakeup_fd:
-                os.read(wakeup_fd, 32)
-
-    # shutdown
-    signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-    boss_of_bind.shutdown()
-    unlink_pid_file(options.pid_file)
-    sys.exit(0)
-
-if __name__ == "__main__":
-    main()
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 1128264..6705760 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -2,7 +2,7 @@
                "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
 	       [<!ENTITY mdash "—">]>
 <!--
- - Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+ - Copyright (C) 2010-2011  Internet Systems Consortium, Inc. ("ISC")
  -
  - Permission to use, copy, modify, and/or distribute this software for any
  - purpose with or without fee is hereby granted, provided that the above
@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>March 31, 2011</date>
+    <date>November 23, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -50,7 +50,7 @@
       <arg><option>-p <replaceable>data_path</replaceable></option></arg>
       <arg><option>-u <replaceable>user</replaceable></option></arg>
       <arg><option>-v</option></arg>
-      <arg><option>--brittle</option></arg>
+      <arg><option>-w <replaceable>wait_time</replaceable></option></arg>
       <arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
       <arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
       <arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
@@ -60,6 +60,7 @@
       <arg><option>--pretty-name <replaceable>name</replaceable></option></arg>
       <arg><option>--user <replaceable>user</replaceable></option></arg>
       <arg><option>--verbose</option></arg>
+      <arg><option>--wait <replaceable>wait_time</replaceable></option></arg>
     </cmdsynopsis>
   </refsynopsisdiv>
 
@@ -90,20 +91,6 @@
 
       <varlistentry>
         <term>
-          <option>--brittle</option>
-        </term>
-        <listitem>
-          <para>
-	    Shutdown if any of the child processes of
-	    <command>bind10</command> exit.  This is intended to
-	    help developers debug the server, and should not be
-	    used in production.
-          </para>
-        </listitem>
-      </varlistentry>
-
-      <varlistentry>
-        <term>
           <option>-c</option> <replaceable>config-filename</replaceable>,
           <option>--config-file</option> <replaceable>config-filename</replaceable>
         </term>
@@ -211,12 +198,246 @@ The default is the basename of ARG 0.
         </listitem>
       </varlistentry>
 
+      <varlistentry>
+        <term><option>-w</option> <replaceable>wait_time</replaceable>, <option>--wait</option> <replaceable>wait_time</replaceable></term>
+        <listitem>
+          <para>Sets the amount of time that BIND 10 will wait for
+          the configuration manager (a key component of BIND 10) to
+          initialize itself before abandoning the start up and
+          terminating with an error.  The wait_time is specified in
+          seconds and has a default value of 10.
+          </para>
+        </listitem>
+      </varlistentry>
+
     </variablelist>
   </refsect1>
 
 <!--
 TODO: configuration section
 -->
+
+  <refsect1>
+    <title>CONFIGURATION AND COMMANDS</title>
+
+    <para>
+      The configuration provides settings for components for
+      <command>bind10</command> to manage under
+      <varname>/Boss/components/</varname>.
+      The default elements are:
+    </para>
+
+    <itemizedlist>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-auth</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-cmdctl</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/setuid</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-stats</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-stats-httpd</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-xfrin</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-xfrout</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-zonemgr</varname> </para>
+      </listitem>
+
+    </itemizedlist>
+
+    <para>
+      (Note that the startup of <command>b10-sockcreator</command>,
+      <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
+      is not configurable. It is hardcoded and <command>bind10</command>
+      will not run without them.)
+    </para>
+
+    <para>
+      These named sets (listed above) contain the following settings:
+    </para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term><varname>address</varname></term>
+        <listitem>
+	  <para>The name used for communicating to it on the message
+	  bus.</para>
+<!-- NOTE: vorner said:
+These can be null, because the components are special ones, and
+the special class there already knows the address. It is (I hope)
+explained in the guide. I'd like to get rid of the special components
+sometime and I'd like it to teach to guess the address.
+-->
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><varname>kind</varname></term>
+        <listitem>
+          <para>
+            This defines how required a component is.
+            The possible settings for <varname>kind</varname> are:
+            <varname>core</varname> (system won't start if it won't
+            start and <command>bind10</command> will shutdown if
+            a <quote>core</quote> component crashes),
+            <varname>dispensable</varname> (<command>bind10</command>
+            will restart failing component),
+            and
+	    <varname>needed</varname> (<command>bind10</command>
+	    will shutdown if component won't initially start, but
+	    if crashes later, it will attempt to restart).
+            This setting is required.
+<!-- TODO: formatting -->
+          </para>
+        </listitem>
+      </varlistentry>
+
+<!--
+TODO: currently not used
+      <varlistentry>
+        <term> <varname>params</varname> </term>
+        <listitem>
+          <para>
+list
+</para>
+        </listitem>
+      </varlistentry>
+-->
+
+      <varlistentry>
+        <term> <varname>priority</varname> </term>
+        <listitem>
+          <para>This is an integer. <command>bind10</command>
+            will start the components with largest priority numbers first.
+          </para>
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+          <term> <varname>process</varname> </term>
+        <listitem>
+          <para>This is the filename of the executable to be started.
+            If not defined, then <command>bind10</command> will
+            use the component name instead.
+          </para>
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+          <term> <varname>special</varname> </term>
+        <listitem>
+          <para>
+            This defines if the component is started a special
+            way.
+<!--
+TODO: document this ... but maybe some of these will be removed
+once we get rid of some using switches for components?
+
+auth
+cfgmgr
+cmdctl
+msgq
+resolver
+setuid
+sockcreator
+xfrin
+-->
+
+</para>
+        </listitem>
+      </varlistentry>
+
+    </variablelist>
+
+<!-- TODO: formating -->
+    <para>
+      The <varname>Boss</varname> configuration commands are:
+    </para>
+<!-- TODO: let's just let bind10 be known as bind10 and not Boss -->
+
+    <para>
+      <command>getstats</command> tells <command>bind10</command>
+      to send its statistics data to the <command>b10-stats</command>
+      daemon.
+      This is an internal command and not exposed to the administrator.
+<!-- not defined in spec -->
+<!-- TODO: explain difference with sendstat -->
+    </para>
+
+    <para>
+      <command>ping</command> is used to check the connection with the
+      <command>bind10</command> daemon.
+      It returns the text <quote>pong</quote>.
+    </para>
+
+    <para>
+      <command>sendstats</command> tells <command>bind10</command>
+      to send its statistics data to the <command>b10-stats</command>
+      daemon immediately.
+<!-- TODO: compare with internal command getstats? -->
+    </para>
+
+    <para>
+      <command>show_processes</command> lists the current processes
+      managed by <command>bind10</command>.
+      The output is an array in JSON format containing the process
+      ID and the name for each.
+<!-- TODO: what is name? -->
+<!-- TODO: change to JSON object format? -->
+<!-- TODO: ticket #1406 -->
+    </para>
+
+    <para>
+      <command>shutdown</command> tells <command>bind10</command>
+      to shutdown the BIND 10 servers.
+      It will tell each process it manages to shutdown and, when
+      complete, <command>bind10</command> will exit.
+    </para>
+
+  </refsect1>
+
+  <refsect1>
+    <title>STATISTICS DATA</title>
+
+    <para>
+      The statistics data collected by the <command>b10-stats</command>
+      daemon include:
+    </para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term>bind10.boot_time</term>
+        <listitem><para>
+          The date and time that the <command>bind10</command>
+          process started.
+          This is represented in ISO 8601 format.
+        </para></listitem>
+      </varlistentry>
+
+    </variablelist>
+
+  </refsect1>
+
 <!--
   <refsect1>
     <title>FILES</title>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index 3f5f637..d850e47 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -20,54 +20,100 @@ The boss process is starting up and will now check if the message bus
 daemon is already running. If so, it will not be able to start, as it
 needs a dedicated message bus.
 
-% BIND10_CONFIGURATION_START_AUTH start authoritative server: %1
-This message shows whether or not the authoritative server should be
-started according to the configuration.
-
-% BIND10_CONFIGURATION_START_RESOLVER start resolver: %1
-This message shows whether or not the resolver should be
-started according to the configuration.
+% BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
+
+% BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
+
+% BIND10_COMPONENT_RESTART component %1 is about to restart
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+
+% BIND10_COMPONENT_START component %1 is starting
+The named component is about to be started by the boss process.
+
+% BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+
+% BIND10_COMPONENT_STOP component %1 is being stopped
+A component is about to be asked to stop willingly by the boss.
+
+% BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+
+% BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+
+% BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+
+% BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+
+% BIND10_CONFIGURATOR_RUN running plan of %1 tasks
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+
+% BIND10_CONFIGURATOR_START bind10 component configurator is starting up
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+
+% BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+
+% BIND10_CONFIGURATOR_TASK performing task %1 on %2
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
 
 % BIND10_INVALID_USER invalid user: %1
 The boss process was started with the -u option, to drop root privileges
 and continue running as the specified user, but the user is unknown.
 
+% BIND10_KILLING_ALL_PROCESSES killing all started processes
+The boss module was not able to start every process it needed to start
+during startup, and will now kill the processes that did get started.
+
 % BIND10_KILL_PROCESS killing process %1
 The boss module is sending a kill signal to process with the given name,
 as part of the process of killing all started processes during a failed
 startup, as described for BIND10_KILLING_ALL_PROCESSES
 
-% BIND10_KILLING_ALL_PROCESSES killing all started processes
-The boss module was not able to start every process it needed to start
-during startup, and will now kill the processes that did get started.
-
 % BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
 There already appears to be a message bus daemon running. Either an
 old process was not shut down correctly, and needs to be killed, or
 another instance of BIND10, with the same msgq domain socket, is
 running, which needs to be stopped.
 
-% BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
-
 % BIND10_MSGQ_DISAPPEARED msgq channel disappeared
 While listening on the message bus channel for messages, it suddenly
 disappeared. The msgq daemon may have died. This might lead to an
 inconsistent state of the system, and BIND 10 will now shut down.
 
-% BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-
-% BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+% BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
 
 % BIND10_READING_BOSS_CONFIGURATION reading boss configuration
 The boss process is starting up, and will now process the initial
@@ -103,6 +149,9 @@ The boss module is sending a SIGKILL signal to the given process.
 % BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)
 The boss module is sending a SIGTERM signal to the given process.
 
+% BIND10_SETUID setting UID to %1
+The boss switches the user it runs as to the given UID.
+
 % BIND10_SHUTDOWN stopping the server
 The boss process received a command or signal telling it to shut down.
 It will send a shutdown command to each process. The processes that do
@@ -113,12 +162,48 @@ it shall send SIGKILL signals to the processes still alive.
 All child processes have been stopped, and the boss process will now
 stop itself.
 
-% BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.
-The given module is being started or restarted without root privileges.
-If the module needs these privileges, it may have problems starting.
-Note that this issue should be resolved by the pending 'socket-creator'
-process; once that has been implemented, modules should not need root
-privileges anymore. See tickets #800 and #801 for more information.
+% BIND10_SOCKCREATOR_BAD_CAUSE unknown error cause from socket creator: %1
+The socket creator reported an error when creating a socket. But the function
+which failed is unknown (not one of 'S' for socket or 'B' for bind).
+
+% BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1
+The boss requested a socket from the creator, but the answer is unknown. This
+looks like a programmer error.
+
+% BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator
+There should be more data from the socket creator, but it closed the socket.
+It probably crashed.
+
+% BIND10_SOCKCREATOR_INIT initializing socket creator parser
+The boss module initializes routines for parsing the socket creator
+protocol.
+
+% BIND10_SOCKCREATOR_KILL killing the socket creator
+The socket creator is being terminated the aggressive way, by sending it
+sigkill. This should not happen usually.
+
+% BIND10_SOCKCREATOR_TERMINATE terminating socket creator
+The boss module sends a request to terminate to the socket creator.
+
+% BIND10_SOCKCREATOR_TRANSPORT_ERROR transport error when talking to the socket creator: %1
+Either sending or receiving data from the socket creator failed with the given
+error. The creator probably crashed or some serious OS-level problem happened,
+as the communication happens only on local host.
+
+% BIND10_SOCKET_CREATED successfully created socket %1
+The socket creator successfully created and sent a requested socket, it has
+the given file number.
+
+% BIND10_SOCKET_ERROR error on %1 call in the creator: %2/%3
+The socket creator failed to create the requested socket. It failed on the
+indicated OS API function with given error.
+
+% BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator
+The boss forwards a request for a socket to the socket creator.
+
+% BIND10_STARTED_CC started configuration/command session
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
 
 % BIND10_STARTED_PROCESS started %1
 The given process has successfully been started.
@@ -129,6 +214,10 @@ The given process has successfully been started, and has the given PID.
 % BIND10_STARTING starting BIND10: %1
 Informational message on startup that shows the full version.
 
+% BIND10_STARTING_CC starting configuration/command session
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+
 % BIND10_STARTING_PROCESS starting process %1
 The boss module is starting the given process.
 
@@ -147,6 +236,32 @@ All modules have been successfully started, and BIND 10 is now running.
 There was a fatal error when BIND10 was trying to start. The error is
 shown, and BIND10 will now shut down.
 
+% BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected.  It may be that processes are starting
+of sequence.
+
+% BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is not recognised.
+
+% BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
+% BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.
+The resolver is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+
 % BIND10_STOP_PROCESS asking %1 to shut down
 The boss module is sending a shutdown command to the given module over
 the message channel.
@@ -155,3 +270,13 @@ the message channel.
 An unknown child process has exited. The PID is printed, but no further
 action will be taken by the boss process.
 
+% BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing.  This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up.  The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
+
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
new file mode 100755
index 0000000..f9816fb
--- /dev/null
+++ b/src/bin/bind10/bind10_src.py.in
@@ -0,0 +1,990 @@
+#!@PYTHON@
+
+# Copyright (C) 2010,2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This file implements the Boss of Bind (BoB, or bob) program.
+
+Its purpose is to start up the BIND 10 system, and then manage the
+processes, by starting and stopping processes, plus restarting
+processes that exit.
+
+To start the system, it first runs the c-channel program (msgq), then
+connects to that. It then runs the configuration manager, and reads
+its own configuration. Then it proceeds to starting other modules.
+
+The Python subprocess module is used for starting processes, but
+because this is not efficient for managing groups of processes,
+SIGCHLD signals are caught and processed using the signal module.
+
+Most of the logic is contained in the BoB class. However, since Python
+requires that signal processing happen in the main thread, we do
+signal handling outside of that class, in the code running for
+__main__.
+"""
+
+import sys; sys.path.append ('@@PYTHONPATH@@')
+import os
+
+# If B10_FROM_SOURCE is set in the environment, we use data files
+# from a directory relative to that, otherwise we use the ones
+# installed on the system
+if "B10_FROM_SOURCE" in os.environ:
+    SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + "/src/bin/bind10/bob.spec"
+    ADD_LIBEXEC_PATH = False
+else:
+    PREFIX = "@prefix@"
+    DATAROOTDIR = "@datarootdir@"
+    SPECFILE_LOCATION = "@datadir@/@PACKAGE@/bob.spec".replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
+    ADD_LIBEXEC_PATH = True
+    
+import subprocess
+import signal
+import re
+import errno
+import time
+import select
+import random
+import socket
+from optparse import OptionParser, OptionValueError
+import io
+import pwd
+import posix
+import copy
+
+import isc.cc
+import isc.util.process
+import isc.net.parse
+import isc.log
+from isc.log_messages.bind10_messages import *
+import isc.bind10.component
+import isc.bind10.special_component
+
+isc.log.init("b10-boss")
+logger = isc.log.Logger("boss")
+
+# Pending system-wide debug level definitions, the ones we
+# use here are hardcoded for now
+DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
+DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
+
+# Assign this process some longer name
+isc.util.process.rename(sys.argv[0])
+
+# This is the version that gets displayed to the user.
+# The VERSION string consists of the module name, the module version
+# number, and the overall BIND 10 version number (set in configure.ac).
+VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
+
+# This is for boot_time of Boss
+_BASETIME = time.gmtime()
+
+class ProcessInfoError(Exception): pass
+
+class ProcessInfo:
+    """Information about a process"""
+
+    dev_null = open(os.devnull, "w")
+
+    def __init__(self, name, args, env={}, dev_null_stdout=False,
+                 dev_null_stderr=False, uid=None, username=None):
+        self.name = name 
+        self.args = args
+        self.env = env
+        self.dev_null_stdout = dev_null_stdout
+        self.dev_null_stderr = dev_null_stderr
+        self.uid = uid
+        self.username = username
+        self.process = None
+        self.pid = None
+
+    def _preexec_work(self):
+        """Function used before running a program that needs to run as a
+        different user."""
+        # First, put us into a separate process group so we don't get
+        # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+        # other means).
+        os.setpgrp()
+        # Second, set the user ID if one has been specified
+        if self.uid is not None:
+            try:
+                posix.setuid(self.uid)
+            except OSError as e:
+                if e.errno == errno.EPERM:
+                    # if we failed to change user due to permission report that
+                    raise ProcessInfoError("Unable to change to user %s (uid %d)" % (self.username, self.uid))
+                else:
+                    # otherwise simply re-raise whatever error we found
+                    raise
+
+    def _spawn(self):
+        if self.dev_null_stdout:
+            spawn_stdout = self.dev_null
+        else:
+            spawn_stdout = None
+        if self.dev_null_stderr:
+            spawn_stderr = self.dev_null
+        else:
+            spawn_stderr = None
+        # Environment variables for the child process will be a copy of those
+        # of the boss process with any additional specific variables given
+        # on construction (self.env).
+        spawn_env = copy.deepcopy(os.environ)
+        spawn_env.update(self.env)
+        if ADD_LIBEXEC_PATH:
+            spawn_env['PATH'] = "@@LIBEXECDIR@@:" + spawn_env['PATH']
+        self.process = subprocess.Popen(self.args,
+                                        stdin=subprocess.PIPE,
+                                        stdout=spawn_stdout,
+                                        stderr=spawn_stderr,
+                                        close_fds=True,
+                                        env=spawn_env,
+                                        preexec_fn=self._preexec_work)
+        self.pid = self.process.pid
+
+    # spawn() and respawn() are the same for now, but in the future they
+    # may have different functionality
+    def spawn(self):
+        self._spawn()
+
+    def respawn(self):
+        self._spawn()
+
+class CChannelConnectError(Exception): pass
+
+class ProcessStartError(Exception): pass
+
+class BoB:
+    """Boss of BIND class."""
+    
+    def __init__(self, msgq_socket_file=None, data_path=None,
+    config_filename=None, nocache=False, verbose=False, setuid=None,
+    username=None, cmdctl_port=None, wait_time=10):
+        """
+            Initialize the Boss of BIND. This is a singleton (only one can run).
+        
+            The msgq_socket_file specifies the UNIX domain socket file that the
+            msgq process listens on.  If verbose is True, then the boss reports
+            what it is doing.
+
+            Data path and config filename are passed through to config manager
+            (if provided) and specify the config file to be used.
+
+            The cmdctl_port is passed to cmdctl and specify on which port it
+            should listen.
+
+            wait_time controls the amount of time (in seconds) that Boss waits
+            for selected processes to initialize before continuing with the
+            initialization.  Currently this is only the configuration manager.
+        """
+        self.cc_session = None
+        self.ccs = None
+        self.curproc = None
+        self.msgq_socket_file = msgq_socket_file
+        self.nocache = nocache
+        self.component_config = {}
+        # Some time in future, it may happen that a single component has
+        # multple processes. If so happens, name "components" may be
+        # inapropriate. But as the code isn't probably completely ready
+        # for it, we leave it at components for now.
+        self.components = {}
+        # Simply list of components that died and need to wait for a
+        # restart. Components manage their own restart schedule now
+        self.components_to_restart = []
+        self.runnable = False
+        self.uid = setuid
+        self.username = username
+        self.verbose = verbose
+        self.data_path = data_path
+        self.config_filename = config_filename
+        self.cmdctl_port = cmdctl_port
+        self.wait_time = wait_time
+        self._component_configurator = isc.bind10.component.Configurator(self,
+            isc.bind10.special_component.get_specials())
+        # The priorities here make them start in the correct order. First
+        # the socket creator (which would drop root privileges by then),
+        # then message queue and after that the config manager (which uses
+        # the config manager)
+        self.__core_components = {
+            'sockcreator': {
+                'kind': 'core',
+                'special': 'sockcreator',
+                'priority': 200
+            },
+            'msgq': {
+                'kind': 'core',
+                'special': 'msgq',
+                'priority': 199
+            },
+            'cfgmgr': {
+                'kind': 'core',
+                'special': 'cfgmgr',
+                'priority': 198
+            }
+        }
+        self.__started = False
+        self.exitcode = 0
+
+        # If -v was set, enable full debug logging.
+        if self.verbose:
+            logger.set_severity("DEBUG", 99)
+
+    def __propagate_component_config(self, config):
+        comps = dict(config)
+        # Fill in the core components, so they stay alive
+        for comp in self.__core_components:
+            if comp in comps:
+                raise Exception(comp + " is core component managed by " +
+                                "bind10 boss, do not set it")
+            comps[comp] = self.__core_components[comp]
+        # Update the configuration
+        self._component_configurator.reconfigure(comps)
+
+    def config_handler(self, new_config):
+        # If this is initial update, don't do anything now, leave it to startup
+        if not self.runnable:
+            return
+        logger.debug(DBG_COMMANDS, BIND10_RECEIVED_NEW_CONFIGURATION,
+                     new_config)
+        try:
+            if 'components' in new_config:
+                self.__propagate_component_config(new_config['components'])
+            return isc.config.ccsession.create_answer(0)
+        except Exception as e:
+            return isc.config.ccsession.create_answer(1, str(e))
+
+    def get_processes(self):
+        pids = list(self.components.keys())
+        pids.sort()
+        process_list = [ ]
+        for pid in pids:
+            process_list.append([pid, self.components[pid].name()])
+        return process_list
+
+    def _get_stats_data(self):
+        return { "owner": "Boss",
+                 "data": { 'boot_time':
+                               time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+                           }
+                 }
+
+    def command_handler(self, command, args):
+        logger.debug(DBG_COMMANDS, BIND10_RECEIVED_COMMAND, command)
+        answer = isc.config.ccsession.create_answer(1, "command not implemented")
+        if type(command) != str:
+            answer = isc.config.ccsession.create_answer(1, "bad command")
+        else:
+            if command == "shutdown":
+                self.runnable = False
+                answer = isc.config.ccsession.create_answer(0)
+            elif command == "getstats":
+                answer = isc.config.ccsession.create_answer(0, self._get_stats_data())
+            elif command == "sendstats":
+                # send statistics data to the stats daemon immediately
+                stats_data = self._get_stats_data()
+                valid = self.ccs.get_module_spec().validate_statistics(
+                    True, stats_data["data"])
+                if valid:
+                    cmd = isc.config.ccsession.create_command('set', stats_data)
+                    seq = self.cc_session.group_sendmsg(cmd, 'Stats')
+                    # Consume the answer, in case it becomes a orphan message.
+                    try:
+                        self.cc_session.group_recvmsg(False, seq)
+                    except isc.cc.session.SessionTimeout:
+                        pass
+                    answer = isc.config.ccsession.create_answer(0)
+                else:
+                    logger.fatal(BIND10_INVALID_STATISTICS_DATA);
+                    answer = isc.config.ccsession.create_answer(
+                        1, "specified statistics data is invalid")
+            elif command == "ping":
+                answer = isc.config.ccsession.create_answer(0, "pong")
+            elif command == "show_processes":
+                answer = isc.config.ccsession. \
+                    create_answer(0, self.get_processes())
+            else:
+                answer = isc.config.ccsession.create_answer(1,
+                                                            "Unknown command")
+        return answer
+
+    def kill_started_components(self):
+        """
+            Called as part of the exception handling when a process fails to
+            start, this runs through the list of started processes, killing
+            each one.  It then clears that list.
+        """
+        logger.info(BIND10_KILLING_ALL_PROCESSES)
+
+        for pid in self.components:
+            logger.info(BIND10_KILL_PROCESS, self.components[pid].name())
+            self.components[pid].kill(True)
+        self.components = {}
+
+    def _read_bind10_config(self):
+        """
+            Reads the parameters associated with the BoB module itself.
+
+            This means the list of components we should start now.
+
+            This could easily be combined into start_all_processes, but
+            it stays because of historical reasons and because the tests
+            replace the method sometimes.
+        """
+        logger.info(BIND10_READING_BOSS_CONFIGURATION)
+
+        config_data = self.ccs.get_full_config()
+        self.__propagate_component_config(config_data['components'])
+
+    def log_starting(self, process, port = None, address = None):
+        """
+            A convenience function to output a "Starting xxx" message if the
+            logging is set to DEBUG with debuglevel DBG_PROCESS or higher.
+            Putting this into a separate method ensures
+            that the output form is consistent across all processes.
+
+            The process name (passed as the first argument) is put into
+            self.curproc, and is used to indicate which process failed to
+            start if there is an error (and is used in the "Started" message
+            on success).  The optional port and address information are
+            appended to the message (if present).
+        """
+        self.curproc = process
+        if port is None and address is None:
+            logger.info(BIND10_STARTING_PROCESS, self.curproc)
+        elif address is None:
+            logger.info(BIND10_STARTING_PROCESS_PORT, self.curproc,
+                        port)
+        else:
+            logger.info(BIND10_STARTING_PROCESS_PORT_ADDRESS,
+                        self.curproc, address, port)
+
+    def log_started(self, pid = None):
+        """
+            A convenience function to output a 'Started xxxx (PID yyyy)'
+            message.  As with starting_message(), this ensures a consistent
+            format.
+        """
+        if pid is None:
+            logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS, self.curproc)
+        else:
+            logger.debug(DBG_PROCESS, BIND10_STARTED_PROCESS_PID, self.curproc, pid)
+
+    def process_running(self, msg, who):
+        """
+            Some processes return a message to the Boss after they have
+            started to indicate that they are running.  The form of the
+            message is a dictionary with contents {"running:", "<process>"}.
+            This method checks the passed message and returns True if the
+            "who" process is contained in the message (so is presumably
+            running).  It returns False for all other conditions and will
+            log an error if appropriate.
+        """
+        if msg is not None:
+            try:
+                if msg["running"] == who:
+                    return True
+                else:
+                    logger.error(BIND10_STARTUP_UNEXPECTED_MESSAGE, msg)
+            except:
+                logger.error(BIND10_STARTUP_UNRECOGNISED_MESSAGE, msg)
+        
+        return False
+
+    # The next few methods start the individual processes of BIND-10.  They
+    # are called via start_all_processes().  If any fail, an exception is
+    # raised which is caught by the caller of start_all_processes(); this kills
+    # processes started up to that point before terminating the program.
+
+    def start_msgq(self):
+        """
+            Start the message queue and connect to the command channel.
+        """
+        self.log_starting("b10-msgq")
+        msgq_proc = ProcessInfo("b10-msgq", ["b10-msgq"], self.c_channel_env,
+                                True, not self.verbose, uid=self.uid,
+                                username=self.username)
+        msgq_proc.spawn()
+        self.log_started(msgq_proc.pid)
+
+        # Now connect to the c-channel
+        cc_connect_start = time.time()
+        while self.cc_session is None:
+            # if we have been trying for "a while" give up
+            if (time.time() - cc_connect_start) > 5:
+                raise CChannelConnectError("Unable to connect to c-channel after 5 seconds")
+
+            # try to connect, and if we can't wait a short while
+            try:
+                self.cc_session = isc.cc.Session(self.msgq_socket_file)
+            except isc.cc.session.SessionError:
+                time.sleep(0.1)
+
+        # Subscribe to the message queue.  The only messages we expect to receive
+        # on this channel are once relating to process startup.
+        self.cc_session.group_subscribe("Boss")
+
+        return msgq_proc
+
+    def start_cfgmgr(self):
+        """
+            Starts the configuration manager process
+        """
+        self.log_starting("b10-cfgmgr")
+        args = ["b10-cfgmgr"]
+        if self.data_path is not None:
+            args.append("--data-path=" + self.data_path)
+        if self.config_filename is not None:
+            args.append("--config-filename=" + self.config_filename)
+        bind_cfgd = ProcessInfo("b10-cfgmgr", args,
+                                self.c_channel_env, uid=self.uid,
+                                username=self.username)
+        bind_cfgd.spawn()
+        self.log_started(bind_cfgd.pid)
+
+        # Wait for the configuration manager to start up as subsequent initialization
+        # cannot proceed without it.  The time to wait can be set on the command line.
+        time_remaining = self.wait_time
+        msg, env = self.cc_session.group_recvmsg()
+        while time_remaining > 0 and not self.process_running(msg, "ConfigManager"):
+            logger.debug(DBG_PROCESS, BIND10_WAIT_CFGMGR)
+            time.sleep(1)
+            time_remaining = time_remaining - 1
+            msg, env = self.cc_session.group_recvmsg()
+        
+        if not self.process_running(msg, "ConfigManager"):
+            raise ProcessStartError("Configuration manager process has not started")
+
+        return bind_cfgd
+
+    def start_ccsession(self, c_channel_env):
+        """
+            Start the CC Session
+
+            The argument c_channel_env is unused but is supplied to keep the
+            argument list the same for all start_xxx methods.
+
+            With regards to logging, note that as the CC session is not a
+            process, the log_starting/log_started methods are not used.
+        """
+        logger.info(BIND10_STARTING_CC)
+        self.ccs = isc.config.ModuleCCSession(SPECFILE_LOCATION, 
+                                      self.config_handler,
+                                      self.command_handler,
+                                      socket_file = self.msgq_socket_file)
+        self.ccs.start()
+        logger.debug(DBG_PROCESS, BIND10_STARTED_CC)
+
+    # A couple of utility methods for starting processes...
+
+    def start_process(self, name, args, c_channel_env, port=None, address=None):
+        """
+            Given a set of command arguments, start the process and output
+            appropriate log messages.  If the start is successful, the process
+            is added to the list of started processes.
+
+            The port and address arguments are for log messages only.
+        """
+        self.log_starting(name, port, address)
+        newproc = ProcessInfo(name, args, c_channel_env)
+        newproc.spawn()
+        self.log_started(newproc.pid)
+        return newproc
+
+    def register_process(self, pid, component):
+        """
+        Put another process into boss to watch over it.  When the process
+        dies, the component.failed() is called with the exit code.
+
+        It is expected the info is a isc.bind10.component.BaseComponent
+        subclass (or anything having the same interface).
+        """
+        self.components[pid] = component
+
+    def start_simple(self, name):
+        """
+            Most of the BIND-10 processes are started with the command:
+
+                <process-name> [-v]
+
+            ... where -v is appended if verbose is enabled.  This method
+            generates the arguments from the name and starts the process.
+
+            The port and address arguments are for log messages only.
+        """
+        # Set up the command arguments.
+        args = [name]
+        if self.verbose:
+            args += ['-v']
+
+        # ... and start the process
+        return self.start_process(name, args, self.c_channel_env)
+
+    # The next few methods start up the rest of the BIND-10 processes.
+    # Although many of these methods are little more than a call to
+    # start_simple, they are retained (a) for testing reasons and (b) as a place
+    # where modifications can be made if the process start-up sequence changes
+    # for a given process.
+
+    def start_auth(self):
+        """
+            Start the Authoritative server
+        """
+        if self.uid is not None and self.__started:
+            logger.warn(BIND10_START_AS_NON_ROOT_AUTH)
+        authargs = ['b10-auth']
+        if self.nocache:
+            authargs += ['-n']
+        if self.uid:
+            authargs += ['-u', str(self.uid)]
+        if self.verbose:
+            authargs += ['-v']
+
+        # ... and start
+        return self.start_process("b10-auth", authargs, self.c_channel_env)
+
+    def start_resolver(self):
+        """
+            Start the Resolver.  At present, all these arguments and switches
+            are pure speculation.  As with the auth daemon, they should be
+            read from the configuration database.
+        """
+        if self.uid is not None and self.__started:
+            logger.warn(BIND10_START_AS_NON_ROOT_RESOLVER)
+        self.curproc = "b10-resolver"
+        # XXX: this must be read from the configuration manager in the future
+        resargs = ['b10-resolver']
+        if self.uid:
+            resargs += ['-u', str(self.uid)]
+        if self.verbose:
+            resargs += ['-v']
+
+        # ... and start
+        return self.start_process("b10-resolver", resargs, self.c_channel_env)
+
+    def start_cmdctl(self):
+        """
+            Starts the command control process
+        """
+        args = ["b10-cmdctl"]
+        if self.cmdctl_port is not None:
+            args.append("--port=" + str(self.cmdctl_port))
+        if self.verbose:
+            args.append("-v")
+        return self.start_process("b10-cmdctl", args, self.c_channel_env,
+                                  self.cmdctl_port)
+
+    def start_all_components(self):
+        """
+            Starts up all the components.  Any exception generated during the
+            starting of the components is handled by the caller.
+        """
+        # Start the real core (sockcreator, msgq, cfgmgr)
+        self._component_configurator.startup(self.__core_components)
+
+        # Connect to the msgq. This is not a process, so it's not handled
+        # inside the configurator.
+        self.start_ccsession(self.c_channel_env)
+
+        # Extract the parameters associated with Bob.  This can only be
+        # done after the CC Session is started.  Note that the logging
+        # configuration may override the "-v" switch set on the command line.
+        self._read_bind10_config()
+
+        # TODO: Return the dropping of privileges
+
+    def startup(self):
+        """
+            Start the BoB instance.
+
+            Returns None if successful, otherwise an string describing the
+            problem.
+        """
+        # Try to connect to the c-channel daemon, to see if it is already
+        # running
+        c_channel_env = {}
+        if self.msgq_socket_file is not None:
+             c_channel_env["BIND10_MSGQ_SOCKET_FILE"] = self.msgq_socket_file
+        logger.debug(DBG_PROCESS, BIND10_CHECK_MSGQ_ALREADY_RUNNING)
+        # try to connect, and if we can't wait a short while
+        try:
+            self.cc_session = isc.cc.Session(self.msgq_socket_file)
+            logger.fatal(BIND10_MSGQ_ALREADY_RUNNING)
+            return "b10-msgq already running, or socket file not cleaned , cannot start"
+        except isc.cc.session.SessionError:
+            # this is the case we want, where the msgq is not running
+            pass
+
+        # Start all components.  If any one fails to start, kill all started
+        # components and exit with an error indication.
+        try:
+            self.c_channel_env = c_channel_env
+            self.start_all_components()
+        except Exception as e:
+            self.kill_started_components()
+            return "Unable to start " + self.curproc + ": " + str(e)
+
+        # Started successfully
+        self.runnable = True
+        self.__started = True
+        return None
+
+    def stop_process(self, process, recipient):
+        """
+        Stop the given process, friendly-like. The process is the name it has
+        (in logs, etc), the recipient is the address on msgq.
+        """
+        logger.info(BIND10_STOP_PROCESS, process)
+        self.cc_session.group_sendmsg({'command': ['shutdown']}, recipient,
+            recipient)
+
+    def component_shutdown(self, exitcode=0):
+        """
+        Stop the Boss instance from a components' request. The exitcode
+        indicates the desired exit code.
+
+        If we did not start yet, it raises an exception, which is meant
+        to propagate through the component and configurator to the startup
+        routine and abort the startup immediately. If it is started up already,
+        we just mark it so we terminate soon.
+
+        It does set the exit code in both cases.
+        """
+        self.exitcode = exitcode
+        if not self.__started:
+            raise Exception("Component failed during startup");
+        else:
+            self.runnable = False
+
+    def shutdown(self):
+        """Stop the BoB instance."""
+        logger.info(BIND10_SHUTDOWN)
+        # first try using the BIND 10 request to stop
+        try:
+            self._component_configurator.shutdown()
+        except:
+            pass
+        # XXX: some delay probably useful... how much is uncertain
+        # I have changed the delay from 0.5 to 1, but sometime it's 
+        # still not enough.
+        time.sleep(1)
+        self.reap_children()
+        # next try sending a SIGTERM
+        components_to_stop = list(self.components.values())
+        for component in components_to_stop:
+            logger.info(BIND10_SEND_SIGTERM, component.name(), component.pid())
+            try:
+                component.kill()
+            except OSError:
+                # ignore these (usually ESRCH because the child
+                # finally exited)
+                pass
+        # finally, send SIGKILL (unmaskable termination) until everybody dies
+        while self.components:
+            # XXX: some delay probably useful... how much is uncertain
+            time.sleep(0.1)  
+            self.reap_children()
+            components_to_stop = list(self.components.values())
+            for component in components_to_stop:
+                logger.info(BIND10_SEND_SIGKILL, component.name(),
+                            component.pid())
+                try:
+                    component.kill(True)
+                except OSError:
+                    # ignore these (usually ESRCH because the child
+                    # finally exited)
+                    pass
+        logger.info(BIND10_SHUTDOWN_COMPLETE)
+
+    def _get_process_exit_status(self):
+        return os.waitpid(-1, os.WNOHANG)
+
+    def reap_children(self):
+        """Check to see if any of our child processes have exited, 
+        and note this for later handling. 
+        """
+        while True:
+            try:
+                (pid, exit_status) = self._get_process_exit_status()
+            except OSError as o:
+                if o.errno == errno.ECHILD: break
+                # XXX: should be impossible to get any other error here
+                raise
+            if pid == 0: break
+            if pid in self.components:
+                # One of the components we know about.  Get information on it.
+                component = self.components.pop(pid)
+                logger.info(BIND10_PROCESS_ENDED, component.name(), pid,
+                            exit_status)
+                if component.running() and self.runnable:
+                    # Tell it it failed. But only if it matters (we are
+                    # not shutting down and the component considers itself
+                    # to be running.
+                    component_restarted = component.failed(exit_status);
+                    # if the process wants to be restarted, but not just yet,
+                    # it returns False
+                    if not component_restarted:
+                        self.components_to_restart.append(component)
+            else:
+                logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
+
+    def restart_processes(self):
+        """
+            Restart any dead processes:
+
+            * Returns the time when the next process is ready to be restarted. 
+            * If the server is shutting down, returns 0.
+            * If there are no processes, returns None.
+
+            The values returned can be safely passed into select() as the 
+            timeout value.
+
+        """
+        if not self.runnable:
+            return 0
+        still_dead = []
+        # keep track of the first time we need to check this queue again,
+        # if at all
+        next_restart_time = None
+        now = time.time()
+        for component in self.components_to_restart:
+            if not component.restart(now):
+                still_dead.append(component)
+                if next_restart_time is None or\
+                   next_restart_time > component.get_restart_time():
+                    next_restart_time = component.get_restart_time()
+        self.components_to_restart = still_dead
+
+        return next_restart_time
+
+# global variables, needed for signal handlers
+options = None
+boss_of_bind = None
+
+def reaper(signal_number, stack_frame):
+    """A child process has died (SIGCHLD received)."""
+    # don't do anything... 
+    # the Python signal handler has been set up to write
+    # down a pipe, waking up our select() bit
+    pass
+
+def get_signame(signal_number):
+    """Return the symbolic name for a signal."""
+    for sig in dir(signal):
+        if sig.startswith("SIG") and sig[3].isalnum():
+            if getattr(signal, sig) == signal_number:
+                return sig
+    return "Unknown signal %d" % signal_number
+
+# XXX: perhaps register atexit() function and invoke that instead
+def fatal_signal(signal_number, stack_frame):
+    """We need to exit (SIGINT or SIGTERM received)."""
+    global options
+    global boss_of_bind
+    logger.info(BIND10_RECEIVED_SIGNAL, get_signame(signal_number))
+    signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+    boss_of_bind.runnable = False
+
+def process_rename(option, opt_str, value, parser):
+    """Function that renames the process if it is requested by a option."""
+    isc.util.process.rename(value)
+
+def parse_args(args=sys.argv[1:], Parser=OptionParser):
+    """
+    Function for parsing command line arguments. Returns the
+    options object from OptionParser.
+    """
+    parser = Parser(version=VERSION)
+    parser.add_option("-m", "--msgq-socket-file", dest="msgq_socket_file",
+                      type="string", default=None,
+                      help="UNIX domain socket file the b10-msgq daemon will use")
+    parser.add_option("-n", "--no-cache", action="store_true", dest="nocache",
+                      default=False, help="disable hot-spot cache in authoritative DNS server")
+    parser.add_option("-u", "--user", dest="user", type="string", default=None,
+                      help="Change user after startup (must run as root)")
+    parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
+                      help="display more about what is going on")
+    parser.add_option("--pretty-name", type="string", action="callback",
+                      callback=process_rename,
+                      help="Set the process name (displayed in ps, top, ...)")
+    parser.add_option("-c", "--config-file", action="store",
+                      dest="config_file", default=None,
+                      help="Configuration database filename")
+    parser.add_option("-p", "--data-path", dest="data_path",
+                      help="Directory to search for configuration files",
+                      default=None)
+    parser.add_option("--cmdctl-port", dest="cmdctl_port", type="int",
+                      default=None, help="Port of command control")
+    parser.add_option("--pid-file", dest="pid_file", type="string",
+                      default=None,
+                      help="file to dump the PID of the BIND 10 process")
+    parser.add_option("-w", "--wait", dest="wait_time", type="int",
+                      default=10, help="Time (in seconds) to wait for config manager to start up")
+
+    (options, args) = parser.parse_args(args)
+
+    if options.cmdctl_port is not None:
+        try:
+            isc.net.parse.port_parse(options.cmdctl_port)
+        except ValueError as e:
+            parser.error(e)
+
+    if args:
+        parser.print_help()
+        sys.exit(1)
+
+    return options
+
+def dump_pid(pid_file):
+    """
+    Dump the PID of the current process to the specified file.  If the given
+    file is None this function does nothing.  If the file already exists,
+    the existing content will be removed.  If a system error happens in
+    creating or writing to the file, the corresponding exception will be
+    propagated to the caller.
+    """
+    if pid_file is None:
+        return
+    f = open(pid_file, "w")
+    f.write('%d\n' % os.getpid())
+    f.close()
+
+def unlink_pid_file(pid_file):
+    """
+    Remove the given file, which is basically expected to be the PID file
+    created by dump_pid().  The specified may or may not exist; if it
+    doesn't this function does nothing.  Other system level errors in removing
+    the file will be propagated as the corresponding exception.
+    """
+    if pid_file is None:
+        return
+    try:
+        os.unlink(pid_file)
+    except OSError as error:
+        if error.errno is not errno.ENOENT:
+            raise
+
+
+def main():
+    global options
+    global boss_of_bind
+    # Enforce line buffering on stdout, even when not a TTY
+    sys.stdout = io.TextIOWrapper(sys.stdout.detach(), line_buffering=True)
+
+    options = parse_args()
+
+    # Check user ID.
+    setuid = None
+    username = None
+    if options.user:
+        # Try getting information about the user, assuming UID passed.
+        try:
+            pw_ent = pwd.getpwuid(int(options.user))
+            setuid = pw_ent.pw_uid
+            username = pw_ent.pw_name
+        except ValueError:
+            pass
+        except KeyError:
+            pass
+
+        # Next try getting information about the user, assuming user name 
+        # passed.
+        # If the information is both a valid user name and user number, we
+        # prefer the name because we try it second. A minor point, hopefully.
+        try:
+            pw_ent = pwd.getpwnam(options.user)
+            setuid = pw_ent.pw_uid
+            username = pw_ent.pw_name
+        except KeyError:
+            pass
+
+        if setuid is None:
+            logger.fatal(BIND10_INVALID_USER, options.user)
+            sys.exit(1)
+
+    # Announce startup.
+    logger.info(BIND10_STARTING, VERSION)
+
+    # Create wakeup pipe for signal handlers
+    wakeup_pipe = os.pipe()
+    signal.set_wakeup_fd(wakeup_pipe[1])
+
+    # Set signal handlers for catching child termination, as well
+    # as our own demise.
+    signal.signal(signal.SIGCHLD, reaper)
+    signal.siginterrupt(signal.SIGCHLD, False)
+    signal.signal(signal.SIGINT, fatal_signal)
+    signal.signal(signal.SIGTERM, fatal_signal)
+
+    # Block SIGPIPE, as we don't want it to end this process
+    signal.signal(signal.SIGPIPE, signal.SIG_IGN)
+
+    # Go bob!
+    boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+                       options.config_file, options.nocache, options.verbose,
+                       setuid, username, options.cmdctl_port,
+                       options.wait_time)
+    startup_result = boss_of_bind.startup()
+    if startup_result:
+        logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+        sys.exit(1)
+    logger.info(BIND10_STARTUP_COMPLETE)
+    dump_pid(options.pid_file)
+
+    # In our main loop, we check for dead processes or messages 
+    # on the c-channel.
+    wakeup_fd = wakeup_pipe[0]
+    ccs_fd = boss_of_bind.ccs.get_socket().fileno()
+    while boss_of_bind.runnable:
+        # clean up any processes that exited
+        boss_of_bind.reap_children()
+        next_restart = boss_of_bind.restart_processes()
+        if next_restart is None:
+            wait_time = None
+        else:
+            wait_time = max(next_restart - time.time(), 0)
+
+        # select() can raise EINTR when a signal arrives, 
+        # even if they are resumable, so we have to catch
+        # the exception
+        try:
+            (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [], 
+                                                  wait_time)
+        except select.error as err:
+            if err.args[0] == errno.EINTR:
+                (rlist, wlist, xlist) = ([], [], [])
+            else:
+                logger.fatal(BIND10_SELECT_ERROR, err)
+                break
+
+        for fd in rlist + xlist:
+            if fd == ccs_fd:
+                try:
+                    boss_of_bind.ccs.check_command()
+                except isc.cc.session.ProtocolError:
+                    logger.fatal(BIND10_MSGQ_DISAPPEARED)
+                    self.runnable = False
+                    break
+            elif fd == wakeup_fd:
+                os.read(wakeup_fd, 32)
+
+    # shutdown
+    signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+    boss_of_bind.shutdown()
+    unlink_pid_file(options.pid_file)
+    sys.exit(0)
+
+if __name__ == "__main__":
+    main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 1184fd1..adc9798 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -4,16 +4,71 @@
     "module_description": "Master process",
     "config_data": [
       {
-        "item_name": "start_auth",
-        "item_type": "boolean",
+        "item_name": "components",
+        "item_type": "named_set",
         "item_optional": false,
-        "item_default": true
-      },
-      {
-        "item_name": "start_resolver",
-        "item_type": "boolean",
-        "item_optional": false,
-        "item_default": false
+        "item_default": {
+          "b10-auth": { "special": "auth", "kind": "needed", "priority": 10 },
+          "setuid": {
+            "special": "setuid",
+            "priority": 5,
+            "kind": "dispensable"
+          },
+          "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+          "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+          "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
+          "b10-stats": { "address": "Stats", "kind": "dispensable" },
+          "b10-stats-httpd": {
+            "address": "StatsHttpd",
+            "kind": "dispensable"
+          },
+          "b10-cmdctl": { "special": "cmdctl", "kind": "needed" }
+        },
+        "named_set_item_spec": {
+          "item_name": "component",
+          "item_type": "map",
+          "item_optional": false,
+          "item_default": { },
+          "map_item_spec": [
+            {
+              "item_name": "special",
+              "item_optional": true,
+              "item_type": "string"
+            },
+            {
+              "item_name": "process",
+              "item_optional": true,
+              "item_type": "string"
+            },
+            {
+              "item_name": "kind",
+              "item_optional": false,
+              "item_type": "string",
+              "item_default": "dispensable"
+            },
+            {
+              "item_name": "address",
+              "item_optional": true,
+              "item_type": "string"
+            },
+            {
+              "item_name": "params",
+              "item_optional": true,
+              "item_type": "list",
+              "list_item_spec": {
+                "item_name": "param",
+                "item_optional": false,
+                "item_type": "string",
+                "item_default": ""
+              }
+            },
+            {
+              "item_name": "priority",
+              "item_optional": true,
+              "item_type": "integer"
+            }
+          ]
+        }
       }
     ],
     "commands": [
@@ -37,6 +92,17 @@
         "command_description": "List the running BIND 10 processes",
         "command_args": []
       }
+    ],
+    "statistics": [
+      {
+        "item_name": "boot_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "Boot time",
+        "item_description": "A date time when bind10 process starts initially",
+        "item_format": "date-time"
+      }
     ]
   }
 }
diff --git a/src/bin/bind10/creatorapi.txt b/src/bin/bind10/creatorapi.txt
new file mode 100644
index 0000000..c23d907
--- /dev/null
+++ b/src/bin/bind10/creatorapi.txt
@@ -0,0 +1,123 @@
+Socket creator API
+==================
+
+This API is between Boss and other modules to allow them requesting of sockets.
+For simplicity, we will use the socket creator for all (even non-privileged)
+ports for now, but we should have some function where we can abstract it later.
+
+Goals
+-----
+* Be able to request a socket of any combination IPv4/IPv6 UDP/TCP bound to given
+  port and address (sockets that are not bound to anything can be created
+  without privileges, therefore are not requested from the socket creator).
+* Allow to provide the same socket to multiple modules (eg. multiple running
+  auth servers).
+* Allow releasing the sockets (in case all modules using it give it up,
+  terminate or crash).
+* Allow restricting of the sharing (don't allow shared socket between auth
+  and recursive, as the packets would often get to the wrong application,
+  show error instead).
+* Get the socket to the application.
+
+Transport of sockets
+--------------------
+It seems we are stuck with current msgq for a while and there's a chance the
+new replacement will not be able to send sockets inbound. So, we need another
+channel.
+
+The boss will create a unix-domain socket and listen on it. When something
+requests a socket over the command channel and the socket is created, some kind
+of token is returned to the application (which will represent the future
+socket). The application then connects to the unix-domain socket, sends the
+token over the connection (so Boss will know which socket to send there, in case
+multiple applications ask for sockets simultaneously) and Boss sends the socket
+in return.
+
+In theory, we could send the requests directly over the unix-domain
+socket, but it has two disadvantages:
+* The msgq handles serializing/deserializing of structured
+  information (like the parameters to be used), we would have to do it
+  manually on the socket.
+* We could place some kind of security in front of msgq (in case file
+  permissions are not enough, for example if they are not honored on
+  socket files, as indicated in the first paragraph of:
+  http://lkml.indiana.edu/hypermail/linux/kernel/0505.2/0008.html).
+  The socket would have to be secured separately. With the tokens,
+  there's some level of security already - someone not having the
+  token can't request a priviledged socket.
+
+Caching of sockets
+------------------
+To allow sending the same socket to multiple application, the Boss process will
+hold a cache. Each socket that is created and sent is kept open in Boss and
+preserved there as well. A reference count is kept with each of them.
+
+When another application asks for the same socket, it is simply sent from the
+cache instead of creating it again by the creator.
+
+When application gives the socket willingly (by sending a message over the
+command channel), the reference count can be decreased without problems. But
+when the application terminates or crashes, we need to decrease it as well.
+There's a problem, since we don't know which command channel connection (eg.
+lname) belongs to which PID. Furthermore, the applications don't need to be
+started by boss.
+
+There are two possibilities:
+* Let the msgq send messages about disconnected clients (eg. group message to
+  some name). This one is better if we want to migrate to dbus, since dbus
+  already has this capability as well as sending the sockets inbound (at least it
+  seems so on unix) and we could get rid of the unix-domain socket completely.
+* Keep the unix-domain connections open forever. Boss can remember which socket
+  was sent to which connection and when the connection closes (because the
+  application crashed), it can drop all the references on the sockets. This
+  seems easier to implement.
+
+The commands
+------------
+* Command to release a socket. This one would have single parameter, the token
+  used to get the socket. After this, boss would decrease its reference count
+  and if it drops to zero, close its own copy of the socket. This should be used
+  when the module stops using the socket (and after closes it). The
+  library could remember the file-descriptor to token mapping (for
+  common applications that don't request the same socket multiple
+  times in parallel).
+* Command to request a socket. It would have parameters to specify which socket
+  (IP address, address family, port) and how to allow sharing. Sharing would be
+  one of:
+  - None
+  - Same kind of application (however, it is not entirely clear what
+    this means, in case it won't work out intuitively, we'll need to
+    define it somehow)
+  - Any kind of application
+  And a kind of application would be provided, to decide if the sharing is
+  possible (eg. if auth allows sharing with the same kind and something else
+  allows sharing with anything, the sharing is not possible, two auths can).
+
+  It would return either error (the socket can't be created or sharing is not
+  possible) or the token. Then there would be some time for the application to
+  pick up the requested socket.
+
+Examples
+--------
+We probably would have a library with blocking calls to request the
+sockets, so a code could look like:
+
+(socket_fd, token) = request_socket(address, port, 'UDP', SHARE_SAMENAME, 'test-application')
+sock = socket.fromfd(socket_fd)
+
+# Some sock.send and sock.recv stuff here
+
+sock.close()
+release_socket(socket_fd) # or release_socket(token)
+
+Known limitations
+-----------------
+Currently the socket creator doesn't support specifying any socket
+options. If it turns out there are any options that need to be set
+before bind(), we'll need to extend it (and extend the protocol as
+well). If we want to support them, we'll have to solve a possible
+conflict (what to do when two applications request the same socket and
+want to share it, but want different options).
+
+The current socket creator doesn't know raw sockets, but if they are
+needed, it should be easy to add.
diff --git a/src/bin/bind10/run_bind10.sh.in b/src/bin/bind10/run_bind10.sh.in
index 4020593..9e4abc0 100755
--- a/src/bin/bind10/run_bind10.sh.in
+++ b/src/bin/bind10/run_bind10.sh.in
@@ -20,17 +20,17 @@ export PYTHON_EXEC
 
 BIND10_PATH=@abs_top_builddir@/src/bin/bind10
 
-PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:$PATH
+PATH=@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
 export PATH
 
-PYTHONPATH=@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
 export PYTHONPATH
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
 	export @ENV_LIBRARY_PATH@
 fi
 
@@ -45,6 +45,5 @@ export B10_FROM_BUILD
 BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
 export BIND10_MSGQ_SOCKET_FILE
 
-cd ${BIND10_PATH}
-exec ${PYTHON_EXEC} -O bind10 "$@"
+exec ${PYTHON_EXEC} -O ${BIND10_PATH}/bind10 "$@"
 
diff --git a/src/bin/bind10/tests/Makefile.am b/src/bin/bind10/tests/Makefile.am
index 3d8d57a..d54ee56 100644
--- a/src/bin/bind10/tests/Makefile.am
+++ b/src/bin/bind10/tests/Makefile.am
@@ -2,13 +2,13 @@ PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 #PYTESTS = args_test.py bind10_test.py
 # NOTE: this has a generated test found in the builddir
 PYTESTS = bind10_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -20,8 +20,9 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	chmod +x $(abs_builddir)/$$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
 	BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
 		$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 9d794a6..c917d33 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,7 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
-from bind10 import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
 
 # XXX: environment tests are currently disabled, due to the preprocessor
 #      setup that we have now complicating the environment
@@ -21,6 +21,7 @@ from bind10 import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BAS
 import unittest
 import sys
 import os
+import copy
 import signal
 import socket
 from isc.net.addr import IPAddr
@@ -103,17 +104,11 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.msgq_socket_file, None)
         self.assertEqual(bob.cc_session, None)
         self.assertEqual(bob.ccs, None)
-        self.assertEqual(bob.processes, {})
-        self.assertEqual(bob.dead_processes, {})
+        self.assertEqual(bob.components, {})
         self.assertEqual(bob.runnable, False)
         self.assertEqual(bob.uid, None)
         self.assertEqual(bob.username, None)
         self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-
-        self.assertEqual(bob.cfg_start_dhcp4, False)
-        self.assertEqual(bob.cfg_start_dhcp6, False)
 
     def test_init_alternate_socket(self):
         bob = BoB("alt_socket_file")
@@ -121,25 +116,38 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.msgq_socket_file, "alt_socket_file")
         self.assertEqual(bob.cc_session, None)
         self.assertEqual(bob.ccs, None)
-        self.assertEqual(bob.processes, {})
-        self.assertEqual(bob.dead_processes, {})
+        self.assertEqual(bob.components, {})
         self.assertEqual(bob.runnable, False)
         self.assertEqual(bob.uid, None)
         self.assertEqual(bob.username, None)
         self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-        self.assertEqual(bob.cfg_start_dhcp4, False)
-        self.assertEqual(bob.cfg_start_dhcp6, False)
 
     def test_command_handler(self):
         class DummySession():
             def group_sendmsg(self, msg, group):
                 (self.msg, self.group) = (msg, group)
             def group_recvmsg(self, nonblock, seq): pass
+        class DummyModuleCCSession():
+            module_spec = isc.config.module_spec.ModuleSpec({
+                    "module_name": "Boss",
+                    "statistics": [
+                        {
+                            "item_name": "boot_time",
+                            "item_type": "string",
+                            "item_optional": False,
+                            "item_default": "1970-01-01T00:00:00Z",
+                            "item_title": "Boot time",
+                            "item_description": "A date time when bind10 process starts initially",
+                            "item_format": "date-time"
+                            }
+                        ]
+                    })
+            def get_module_spec(self):
+                return self.module_spec
         bob = BoB()
         bob.verbose = True
         bob.cc_session = DummySession()
+        bob.ccs = DummyModuleCCSession()
         # a bad command
         self.assertEqual(bob.command_handler(-1, None),
                          isc.config.ccsession.create_answer(1, "bad command"))
@@ -147,14 +155,22 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.command_handler("shutdown", None),
                          isc.config.ccsession.create_answer(0))
         self.assertFalse(bob.runnable)
+        # "getstats" command
+        self.assertEqual(bob.command_handler("getstats", None),
+                         isc.config.ccsession.create_answer(0,
+                            { "owner": "Boss",
+                              "data": {
+                                'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+                            }}))
         # "sendstats" command
         self.assertEqual(bob.command_handler("sendstats", None),
                          isc.config.ccsession.create_answer(0))
         self.assertEqual(bob.cc_session.group, "Stats")
         self.assertEqual(bob.cc_session.msg,
                          isc.config.ccsession.create_command(
-                'set', { "stats_data": {
-                        'bind10.boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', _BASETIME)
+                "set", { "owner": "Boss",
+                         "data": {
+                        "boot_time": time.strftime("%Y-%m-%dT%H:%M:%SZ", _BASETIME)
                         }}))
         # "ping" command
         self.assertEqual(bob.command_handler("ping", None),
@@ -191,142 +207,192 @@ class MockBob(BoB):
         self.stats = False
         self.stats_httpd = False
         self.cmdctl = False
+        self.dhcp6 = False
+        self.dhcp4 = False
         self.c_channel_env = {}
-        self.processes = { }
-
-    def read_bind10_config(self):
+        self.components = { }
+        self.creator = False
+
+        class MockSockCreator(isc.bind10.component.Component):
+            def __init__(self, process, boss, kind, address=None, params=None):
+                isc.bind10.component.Component.__init__(self, process, boss,
+                                                        kind, 'SockCreator')
+                self._start_func = boss.start_creator
+
+        specials = isc.bind10.special_component.get_specials()
+        specials['sockcreator'] = MockSockCreator
+        self._component_configurator = \
+            isc.bind10.component.Configurator(self, specials)
+
+    def start_creator(self):
+        self.creator = True
+        procinfo = ProcessInfo('b10-sockcreator', ['/bin/false'])
+        procinfo.pid = 1
+        return procinfo
+
+    def _read_bind10_config(self):
         # Configuration options are set directly
         pass
 
-    def start_msgq(self, c_channel_env):
+    def start_msgq(self):
         self.msgq = True
-        self.processes[2] = ProcessInfo('b10-msgq', ['/bin/false'])
-        self.processes[2].pid = 2
-
-    def start_cfgmgr(self, c_channel_env):
-        self.cfgmgr = True
-        self.processes[3] = ProcessInfo('b10-cfgmgr', ['/bin/false'])
-        self.processes[3].pid = 3
+        procinfo = ProcessInfo('b10-msgq', ['/bin/false'])
+        procinfo.pid = 2
+        return procinfo
 
     def start_ccsession(self, c_channel_env):
+        # this is not a process, don't have to do anything with procinfo
         self.ccsession = True
-        self.processes[4] = ProcessInfo('b10-ccsession', ['/bin/false'])
-        self.processes[4].pid = 4
 
-    def start_auth(self, c_channel_env):
+    def start_cfgmgr(self):
+        self.cfgmgr = True
+        procinfo = ProcessInfo('b10-cfgmgr', ['/bin/false'])
+        procinfo.pid = 3
+        return procinfo
+
+    def start_auth(self):
         self.auth = True
-        self.processes[5] = ProcessInfo('b10-auth', ['/bin/false'])
-        self.processes[5].pid = 5
+        procinfo = ProcessInfo('b10-auth', ['/bin/false'])
+        procinfo.pid = 5
+        return procinfo
 
-    def start_resolver(self, c_channel_env):
+    def start_resolver(self):
         self.resolver = True
-        self.processes[6] = ProcessInfo('b10-resolver', ['/bin/false'])
-        self.processes[6].pid = 6
-
-    def start_xfrout(self, c_channel_env):
+        procinfo = ProcessInfo('b10-resolver', ['/bin/false'])
+        procinfo.pid = 6
+        return procinfo
+
+    def start_simple(self, name):
+        procmap = { 'b10-zonemgr': self.start_zonemgr,
+                    'b10-stats': self.start_stats,
+                    'b10-stats-httpd': self.start_stats_httpd,
+                    'b10-cmdctl': self.start_cmdctl,
+                    'b10-dhcp6': self.start_dhcp6,
+                    'b10-dhcp4': self.start_dhcp4,
+                    'b10-xfrin': self.start_xfrin,
+                    'b10-xfrout': self.start_xfrout }
+        return procmap[name]()
+
+    def start_xfrout(self):
         self.xfrout = True
-        self.processes[7] = ProcessInfo('b10-xfrout', ['/bin/false'])
-        self.processes[7].pid = 7
+        procinfo = ProcessInfo('b10-xfrout', ['/bin/false'])
+        procinfo.pid = 7
+        return procinfo
 
-    def start_xfrin(self, c_channel_env):
+    def start_xfrin(self):
         self.xfrin = True
-        self.processes[8] = ProcessInfo('b10-xfrin', ['/bin/false'])
-        self.processes[8].pid = 8
+        procinfo = ProcessInfo('b10-xfrin', ['/bin/false'])
+        procinfo.pid = 8
+        return procinfo
 
-    def start_zonemgr(self, c_channel_env):
+    def start_zonemgr(self):
         self.zonemgr = True
-        self.processes[9] = ProcessInfo('b10-zonemgr', ['/bin/false'])
-        self.processes[9].pid = 9
+        procinfo = ProcessInfo('b10-zonemgr', ['/bin/false'])
+        procinfo.pid = 9
+        return procinfo
 
-    def start_stats(self, c_channel_env):
+    def start_stats(self):
         self.stats = True
-        self.processes[10] = ProcessInfo('b10-stats', ['/bin/false'])
-        self.processes[10].pid = 10
+        procinfo = ProcessInfo('b10-stats', ['/bin/false'])
+        procinfo.pid = 10
+        return procinfo
 
-    def start_stats_httpd(self, c_channel_env):
+    def start_stats_httpd(self):
         self.stats_httpd = True
-        self.processes[11] = ProcessInfo('b10-stats-httpd', ['/bin/false'])
-        self.processes[11].pid = 11
+        procinfo = ProcessInfo('b10-stats-httpd', ['/bin/false'])
+        procinfo.pid = 11
+        return procinfo
 
-    def start_cmdctl(self, c_channel_env):
+    def start_cmdctl(self):
         self.cmdctl = True
-        self.processes[12] = ProcessInfo('b10-cmdctl', ['/bin/false'])
-        self.processes[12].pid = 12
+        procinfo = ProcessInfo('b10-cmdctl', ['/bin/false'])
+        procinfo.pid = 12
+        return procinfo
 
-    def start_dhcp6(self, c_channel_env):
+    def start_dhcp6(self):
         self.dhcp6 = True
-        self.processes[13] = ProcessInfo('b10-dhcp6', ['/bin/false'])
-        self.processes[13]
+        procinfo = ProcessInfo('b10-dhcp6', ['/bin/false'])
+        procinfo.pid = 13
+        return procinfo
 
-    def start_dhcp4(self, c_channel_env):
+    def start_dhcp4(self):
         self.dhcp4 = True
-        self.processes[14] = ProcessInfo('b10-dhcp4', ['/bin/false'])
-        self.processes[14]
-
-    # We don't really use all of these stop_ methods. But it might turn out
-    # someone would add some stop_ method to BoB and we want that one overriden
-    # in case he forgets to update the tests.
+        procinfo = ProcessInfo('b10-dhcp4', ['/bin/false'])
+        procinfo.pid = 14
+        return procinfo
+
+    def stop_process(self, process, recipient):
+        procmap = { 'b10-auth': self.stop_auth,
+                    'b10-resolver': self.stop_resolver,
+                    'b10-xfrout': self.stop_xfrout,
+                    'b10-xfrin': self.stop_xfrin,
+                    'b10-zonemgr': self.stop_zonemgr,
+                    'b10-stats': self.stop_stats,
+                    'b10-stats-httpd': self.stop_stats_httpd,
+                    'b10-cmdctl': self.stop_cmdctl }
+        procmap[process]()
+
+    # Some functions to pretend we stop processes, use by stop_process
     def stop_msgq(self):
         if self.msgq:
-            del self.processes[2]
+            del self.components[2]
         self.msgq = False
 
     def stop_cfgmgr(self):
         if self.cfgmgr:
-            del self.processes[3]
+            del self.components[3]
         self.cfgmgr = False
 
-    def stop_ccsession(self):
-        if self.ccssession:
-            del self.processes[4]
-        self.ccsession = False
-
     def stop_auth(self):
         if self.auth:
-            del self.processes[5]
+            del self.components[5]
         self.auth = False
 
     def stop_resolver(self):
         if self.resolver:
-            del self.processes[6]
+            del self.components[6]
         self.resolver = False
 
     def stop_xfrout(self):
         if self.xfrout:
-            del self.processes[7]
+            del self.components[7]
         self.xfrout = False
 
     def stop_xfrin(self):
         if self.xfrin:
-            del self.processes[8]
+            del self.components[8]
         self.xfrin = False
 
     def stop_zonemgr(self):
         if self.zonemgr:
-            del self.processes[9]
+            del self.components[9]
         self.zonemgr = False
 
     def stop_stats(self):
         if self.stats:
-            del self.processes[10]
+            del self.components[10]
         self.stats = False
 
     def stop_stats_httpd(self):
         if self.stats_httpd:
-            del self.processes[11]
+            del self.components[11]
         self.stats_httpd = False
 
     def stop_cmdctl(self):
         if self.cmdctl:
-            del self.processes[12]
+            del self.components[12]
         self.cmdctl = False
 
 class TestStartStopProcessesBob(unittest.TestCase):
     """
-    Check that the start_all_processes method starts the right combination
-    of processes and that the right processes are started and stopped
+    Check that the start_all_components method starts the right combination
+    of components and that the right components are started and stopped
     according to changes in configuration.
     """
+    def check_environment_unchanged(self):
+        # Check whether the environment has not been changed
+        self.assertEqual(original_os_environ, os.environ)
+
     def check_started(self, bob, core, auth, resolver):
         """
         Check that the right sets of services are started. The ones that
@@ -337,6 +403,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
         self.assertEqual(bob.msgq, core)
         self.assertEqual(bob.cfgmgr, core)
         self.assertEqual(bob.ccsession, core)
+        self.assertEqual(bob.creator, core)
         self.assertEqual(bob.auth, auth)
         self.assertEqual(bob.resolver, resolver)
         self.assertEqual(bob.xfrout, auth)
@@ -345,6 +412,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
         self.assertEqual(bob.stats, core)
         self.assertEqual(bob.stats_httpd, core)
         self.assertEqual(bob.cmdctl, core)
+        self.check_environment_unchanged()
 
     def check_preconditions(self, bob):
         self.check_started(bob, False, False, False)
@@ -352,9 +420,10 @@ class TestStartStopProcessesBob(unittest.TestCase):
     def check_started_none(self, bob):
         """
         Check that the situation is according to configuration where no servers
-        should be started. Some processes still need to be running.
+        should be started. Some components still need to be running.
         """
         self.check_started(bob, True, False, False)
+        self.check_environment_unchanged()
 
     def check_started_both(self, bob):
         """
@@ -362,96 +431,86 @@ class TestStartStopProcessesBob(unittest.TestCase):
         (auth and resolver) are enabled.
         """
         self.check_started(bob, True, True, True)
+        self.check_environment_unchanged()
 
     def check_started_auth(self, bob):
         """
-        Check the set of processes needed to run auth only is started.
+        Check the set of components needed to run auth only is started.
         """
         self.check_started(bob, True, True, False)
+        self.check_environment_unchanged()
 
     def check_started_resolver(self, bob):
         """
-        Check the set of processes needed to run resolver only is started.
+        Check the set of components needed to run resolver only is started.
         """
         self.check_started(bob, True, False, True)
+        self.check_environment_unchanged()
 
     def check_started_dhcp(self, bob, v4, v6):
         """
         Check if proper combinations of DHCPv4 and DHCpv6 can be started
         """
-        v4found = 0
-        v6found = 0
-
-        for pid in bob.processes:
-            if (bob.processes[pid].name == "b10-dhcp4"):
-                v4found += 1
-            if (bob.processes[pid].name == "b10-dhcp6"):
-                v6found += 1
-
-        # there should be exactly one DHCPv4 daemon (if v4==True)
-        # there should be exactly one DHCPv6 daemon (if v6==True)
-        self.assertEqual(v4==True, v4found==1)
-        self.assertEqual(v6==True, v6found==1)
-
-    # Checks the processes started when starting neither auth nor resolver
-    # is specified.
-    def test_start_none(self):
-        # Create BoB and ensure correct initialization
-        bob = MockBob()
-        self.check_preconditions(bob)
-
-        # Start processes and check what was started
-        bob.cfg_start_auth = False
-        bob.cfg_start_resolver = False
-
-        bob.start_all_processes()
-        self.check_started_none(bob)
-
-    # Checks the processes started when starting only the auth process
-    def test_start_auth(self):
-        # Create BoB and ensure correct initialization
+        self.assertEqual(v4, bob.dhcp4)
+        self.assertEqual(v6, bob.dhcp6)
+        self.check_environment_unchanged()
+
+    def construct_config(self, start_auth, start_resolver):
+        # The things that are common, not turned on an off
+        config = {}
+        config['b10-stats'] = { 'kind': 'dispensable', 'address': 'Stats' }
+        config['b10-stats-httpd'] = { 'kind': 'dispensable',
+                                      'address': 'StatsHttpd' }
+        config['b10-cmdctl'] = { 'kind': 'needed', 'special': 'cmdctl' }
+        if start_auth:
+            config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
+            config['b10-xfrout'] = { 'kind': 'dispensable',
+                                     'address': 'Xfrout' }
+            config['b10-xfrin'] = { 'kind': 'dispensable',
+                                    'address': 'Xfrin' }
+            config['b10-zonemgr'] = { 'kind': 'dispensable',
+                                      'address': 'Zonemgr' }
+        if start_resolver:
+            config['b10-resolver'] = { 'kind': 'needed',
+                                       'special': 'resolver' }
+        return {'components': config}
+
+    def config_start_init(self, start_auth, start_resolver):
+        """
+        Test the configuration is loaded at the startup.
+        """
         bob = MockBob()
-        self.check_preconditions(bob)
-
-        # Start processes and check what was started
-        bob.cfg_start_auth = True
-        bob.cfg_start_resolver = False
+        config = self.construct_config(start_auth, start_resolver)
+        class CC:
+            def get_full_config(self):
+                return config
+        # Provide the fake CC with data
+        bob.ccs = CC()
+        # And make sure it's not overwritten
+        def start_ccsession():
+            bob.ccsession = True
+        bob.start_ccsession = lambda _: start_ccsession()
+        # We need to return the original _read_bind10_config
+        bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
+        bob.start_all_components()
+        self.check_started(bob, True, start_auth, start_resolver)
+        self.check_environment_unchanged()
 
-        bob.start_all_processes()
-
-        self.check_started_auth(bob)
+    def test_start_none(self):
+        self.config_start_init(False, False)
 
-    # Checks the processes started when starting only the resolver process
     def test_start_resolver(self):
-        # Create BoB and ensure correct initialization
-        bob = MockBob()
-        self.check_preconditions(bob)
-
-        # Start processes and check what was started
-        bob.cfg_start_auth = False
-        bob.cfg_start_resolver = True
-
-        bob.start_all_processes()
+        self.config_start_init(False, True)
 
-        self.check_started_resolver(bob)
+    def test_start_auth(self):
+        self.config_start_init(True, False)
 
-    # Checks the processes started when starting both auth and resolver process
     def test_start_both(self):
-        # Create BoB and ensure correct initialization
-        bob = MockBob()
-        self.check_preconditions(bob)
-
-        # Start processes and check what was started
-        bob.cfg_start_auth = True
-        bob.cfg_start_resolver = True
-
-        bob.start_all_processes()
-
-        self.check_started_both(bob)
+        self.config_start_init(True, True)
 
     def test_config_start(self):
         """
-        Test that the configuration starts and stops processes according
+        Test that the configuration starts and stops components according
         to configuration changes.
         """
 
@@ -459,17 +518,13 @@ class TestStartStopProcessesBob(unittest.TestCase):
         bob = MockBob()
         self.check_preconditions(bob)
 
-        # Start processes (nothing much should be started, as in
-        # test_start_none)
-        bob.cfg_start_auth = False
-        bob.cfg_start_resolver = False
-
-        bob.start_all_processes()
+        bob.start_all_components()
         bob.runnable = True
+        bob.config_handler(self.construct_config(False, False))
         self.check_started_none(bob)
 
         # Enable both at once
-        bob.config_handler({'start_auth': True, 'start_resolver': True})
+        bob.config_handler(self.construct_config(True, True))
         self.check_started_both(bob)
 
         # Not touched by empty change
@@ -477,11 +532,11 @@ class TestStartStopProcessesBob(unittest.TestCase):
         self.check_started_both(bob)
 
         # Not touched by change to the same configuration
-        bob.config_handler({'start_auth': True, 'start_resolver': True})
+        bob.config_handler(self.construct_config(True, True))
         self.check_started_both(bob)
 
         # Turn them both off again
-        bob.config_handler({'start_auth': False, 'start_resolver': False})
+        bob.config_handler(self.construct_config(False, False))
         self.check_started_none(bob)
 
         # Not touched by empty change
@@ -489,47 +544,45 @@ class TestStartStopProcessesBob(unittest.TestCase):
         self.check_started_none(bob)
 
         # Not touched by change to the same configuration
-        bob.config_handler({'start_auth': False, 'start_resolver': False})
+        bob.config_handler(self.construct_config(False, False))
         self.check_started_none(bob)
 
         # Start and stop auth separately
-        bob.config_handler({'start_auth': True})
+        bob.config_handler(self.construct_config(True, False))
         self.check_started_auth(bob)
 
-        bob.config_handler({'start_auth': False})
+        bob.config_handler(self.construct_config(False, False))
         self.check_started_none(bob)
 
         # Start and stop resolver separately
-        bob.config_handler({'start_resolver': True})
+        bob.config_handler(self.construct_config(False, True))
         self.check_started_resolver(bob)
 
-        bob.config_handler({'start_resolver': False})
+        bob.config_handler(self.construct_config(False, False))
         self.check_started_none(bob)
 
         # Alternate
-        bob.config_handler({'start_auth': True})
+        bob.config_handler(self.construct_config(True, False))
         self.check_started_auth(bob)
 
-        bob.config_handler({'start_auth': False, 'start_resolver': True})
+        bob.config_handler(self.construct_config(False, True))
         self.check_started_resolver(bob)
 
-        bob.config_handler({'start_auth': True, 'start_resolver': False})
+        bob.config_handler(self.construct_config(True, False))
         self.check_started_auth(bob)
 
     def test_config_start_once(self):
         """
-        Tests that a process is started only once.
+        Tests that a component is started only once.
         """
         # Create BoB and ensure correct initialization
         bob = MockBob()
         self.check_preconditions(bob)
 
-        # Start processes (both)
-        bob.cfg_start_auth = True
-        bob.cfg_start_resolver = True
+        bob.start_all_components()
 
-        bob.start_all_processes()
         bob.runnable = True
+        bob.config_handler(self.construct_config(True, True))
         self.check_started_both(bob)
 
         bob.start_auth = lambda: self.fail("Started auth again")
@@ -539,12 +592,11 @@ class TestStartStopProcessesBob(unittest.TestCase):
         bob.start_resolver = lambda: self.fail("Started resolver again")
 
         # Send again we want to start them. Should not do it, as they are.
-        bob.config_handler({'start_auth': True})
-        bob.config_handler({'start_resolver': True})
+        bob.config_handler(self.construct_config(True, True))
 
     def test_config_not_started_early(self):
         """
-        Test that processes are not started by the config handler before
+        Test that components are not started by the config handler before
         startup.
         """
         bob = MockBob()
@@ -558,27 +610,29 @@ class TestStartStopProcessesBob(unittest.TestCase):
 
         bob.config_handler({'start_auth': True, 'start_resolver': True})
 
-    # Checks that DHCP (v4 and v6) processes are started when expected
+    # Checks that DHCP (v4 and v6) components are started when expected
     def test_start_dhcp(self):
 
         # Create BoB and ensure correct initialization
         bob = MockBob()
         self.check_preconditions(bob)
 
-        # don't care about DNS stuff
-        bob.cfg_start_auth = False
-        bob.cfg_start_resolver = False
-
-        # v4 and v6 disabled
-        bob.cfg_start_dhcp6 = False
-        bob.cfg_start_dhcp4 = False
-        bob.start_all_processes()
+        bob.start_all_components()
+        bob.config_handler(self.construct_config(False, False))
         self.check_started_dhcp(bob, False, False)
 
+    def test_start_dhcp_v6only(self):
+        # Create BoB and ensure correct initialization
+        bob = MockBob()
+        self.check_preconditions(bob)
         # v6 only enabled
-        bob.cfg_start_dhcp6 = True
-        bob.cfg_start_dhcp4 = False
-        bob.start_all_processes()
+        bob.start_all_components()
+        bob.runnable = True
+        bob._BoB_started = True
+        config = self.construct_config(False, False)
+        config['components']['b10-dhcp6'] = { 'kind': 'needed',
+                                              'address': 'Dhcp6' }
+        bob.config_handler(config)
         self.check_started_dhcp(bob, False, True)
 
         # uncomment when dhcpv4 becomes implemented
@@ -592,6 +646,12 @@ class TestStartStopProcessesBob(unittest.TestCase):
         #bob.cfg_start_dhcp4 = True
         #self.check_started_dhcp(bob, True, True)
 
+class MockComponent:
+    def __init__(self, name, pid):
+        self.name = lambda: name
+        self.pid = lambda: pid
+
+
 class TestBossCmd(unittest.TestCase):
     def test_ping(self):
         """
@@ -601,7 +661,7 @@ class TestBossCmd(unittest.TestCase):
         answer = bob.command_handler("ping", None)
         self.assertEqual(answer, {'result': [0, 'pong']})
 
-    def test_show_processes(self):
+    def test_show_processes_empty(self):
         """
         Confirm getting a list of processes works.
         """
@@ -609,23 +669,16 @@ class TestBossCmd(unittest.TestCase):
         answer = bob.command_handler("show_processes", None)
         self.assertEqual(answer, {'result': [0, []]})
 
-    def test_show_processes_started(self):
+    def test_show_processes(self):
         """
         Confirm getting a list of processes works.
         """
         bob = MockBob()
-        bob.start_all_processes()
+        bob.register_process(1, MockComponent('first', 1))
+        bob.register_process(2, MockComponent('second', 2))
         answer = bob.command_handler("show_processes", None)
-        processes = [[2, 'b10-msgq'],
-                     [3, 'b10-cfgmgr'], 
-                     [4, 'b10-ccsession'],
-                     [5, 'b10-auth'],
-                     [7, 'b10-xfrout'],
-                     [8, 'b10-xfrin'], 
-                     [9, 'b10-zonemgr'],
-                     [10, 'b10-stats'], 
-                     [11, 'b10-stats-httpd'], 
-                     [12, 'b10-cmdctl']]
+        processes = [[1, 'first'],
+                     [2, 'second']]
         self.assertEqual(answer, {'result': [0, processes]})
 
 class TestParseArgs(unittest.TestCase):
@@ -679,15 +732,6 @@ class TestParseArgs(unittest.TestCase):
         options = parse_args(['--cmdctl-port=1234'], TestOptParser)
         self.assertEqual(1234, options.cmdctl_port)
 
-    def test_brittle(self):
-        """
-        Test we can use the "brittle" flag.
-        """
-        options = parse_args([], TestOptParser)
-        self.assertFalse(options.brittle)
-        options = parse_args(['--brittle'], TestOptParser)
-        self.assertTrue(options.brittle)
-
 class TestPIDFile(unittest.TestCase):
     def setUp(self):
         self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
@@ -735,35 +779,160 @@ class TestPIDFile(unittest.TestCase):
         self.assertRaises(IOError, dump_pid,
                           'nonexistent_dir' + os.sep + 'bind10.pid')
 
-class TestBrittle(unittest.TestCase):
-    def test_brittle_disabled(self):
-        bob = MockBob()
-        bob.start_all_processes()
-        bob.runnable = True
+class TestBossComponents(unittest.TestCase):
+    """
+    Test the boss propagates component configuration properly to the
+    component configurator and acts sane.
+    """
+    def setUp(self):
+        self.__param = None
+        self.__called = False
+        self.__compconfig = {
+            'comp': {
+                'kind': 'needed',
+                'process': 'cat'
+            }
+        }
 
-        bob.reap_children()
-        self.assertTrue(bob.runnable)
+    def __unary_hook(self, param):
+        """
+        A hook function that stores the parameter for later examination.
+        """
+        self.__param = param
 
-    def simulated_exit(self):
-        ret_val = self.exit_info
-        self.exit_info = (0, 0)
-        return ret_val
+    def __nullary_hook(self):
+        """
+        A hook function that notes down it was called.
+        """
+        self.__called = True
 
-    def test_brittle_enabled(self):
+    def __check_core(self, config):
+        """
+        A function checking that the config contains parts for the valid
+        core component configuration.
+        """
+        self.assertIsNotNone(config)
+        for component in ['sockcreator', 'msgq', 'cfgmgr']:
+            self.assertTrue(component in config)
+            self.assertEqual(component, config[component]['special'])
+            self.assertEqual('core', config[component]['kind'])
+
+    def __check_extended(self, config):
+        """
+        This checks that the config contains the core and one more component.
+        """
+        self.__check_core(config)
+        self.assertTrue('comp' in config)
+        self.assertEqual('cat', config['comp']['process'])
+        self.assertEqual('needed', config['comp']['kind'])
+        self.assertEqual(4, len(config))
+
+    def test_correct_run(self):
+        """
+        Test the situation when we run in usual scenario, nothing fails,
+        we just start, reconfigure and then stop peacefully.
+        """
         bob = MockBob()
-        bob.start_all_processes()
+        # Start it
+        orig = bob._component_configurator.startup
+        bob._component_configurator.startup = self.__unary_hook
+        bob.start_all_components()
+        bob._component_configurator.startup = orig
+        self.__check_core(self.__param)
+        self.assertEqual(3, len(self.__param))
+
+        # Reconfigure it
+        self.__param = None
+        orig = bob._component_configurator.reconfigure
+        bob._component_configurator.reconfigure = self.__unary_hook
+        # Otherwise it does not work
         bob.runnable = True
+        bob.config_handler({'components': self.__compconfig})
+        self.__check_extended(self.__param)
+        currconfig = self.__param
+        # If we reconfigure it, but it does not contain the components part,
+        # nothing is called
+        bob.config_handler({})
+        self.assertEqual(self.__param, currconfig)
+        self.__param = None
+        bob._component_configurator.reconfigure = orig
+        # Check a configuration that messes up the core components is rejected.
+        compconf = dict(self.__compconfig)
+        compconf['msgq'] = { 'process': 'echo' }
+        result = bob.config_handler({'components': compconf})
+        # Check it rejected it
+        self.assertEqual(1, result['result'][0])
 
-        bob.brittle = True
-        self.exit_info = (5, 0)
-        bob._get_process_exit_status = self.simulated_exit
+        # We can't call shutdown, that one relies on the stuff in main
+        # We check somewhere else that the shutdown is actually called
+        # from there (the test_kills).
 
-        old_stdout = sys.stdout
-        sys.stdout = open("/dev/null", "w")
-        bob.reap_children()
-        sys.stdout = old_stdout
+    def test_kills(self):
+        """
+        Test that the boss kills components which don't want to stop.
+        """
+        bob = MockBob()
+        killed = []
+        class ImmortalComponent:
+            """
+            An immortal component. It does not stop when it is told so
+            (anyway it is not told so). It does not die if it is killed
+            the first time. It dies only when killed forcefully.
+            """
+            def kill(self, forceful=False):
+                killed.append(forceful)
+                if forceful:
+                    bob.components = {}
+            def pid(self):
+                return 1
+            def name(self):
+                return "Immortal"
+        bob.components = {}
+        bob.register_process(1, ImmortalComponent())
+
+        # While at it, we check the configurator shutdown is actually called
+        orig = bob._component_configurator.shutdown
+        bob._component_configurator.shutdown = self.__nullary_hook
+        self.__called = False
+
+        bob.shutdown()
+
+        self.assertEqual([False, True], killed)
+        self.assertTrue(self.__called)
+
+        bob._component_configurator.shutdown = orig
+
+    def test_component_shutdown(self):
+        """
+        Test the component_shutdown sets all variables accordingly.
+        """
+        bob = MockBob()
+        self.assertRaises(Exception, bob.component_shutdown, 1)
+        self.assertEqual(1, bob.exitcode)
+        bob._BoB__started = True
+        bob.component_shutdown(2)
+        self.assertEqual(2, bob.exitcode)
         self.assertFalse(bob.runnable)
 
+    def test_init_config(self):
+        """
+        Test initial configuration is loaded.
+        """
+        bob = MockBob()
+        # Start it
+        bob._component_configurator.reconfigure = self.__unary_hook
+        # We need to return the original read_bind10_config
+        bob._read_bind10_config = lambda: BoB._read_bind10_config(bob)
+        # And provide a session to read the data from
+        class CC:
+            pass
+        bob.ccs = CC()
+        bob.ccs.get_full_config = lambda: {'components': self.__compconfig}
+        bob.start_all_components()
+        self.__check_extended(self.__param)
+
 if __name__ == '__main__':
+    # store os.environ for test_unchanged_environment
+    original_os_environ = copy.deepcopy(os.environ)
     isc.log.resetUnitTestRootLogger()
     unittest.main()
diff --git a/src/bin/bindctl/Makefile.am b/src/bin/bindctl/Makefile.am
index cd8bcb3..700f26e 100644
--- a/src/bin/bindctl/Makefile.am
+++ b/src/bin/bindctl/Makefile.am
@@ -5,6 +5,8 @@ man_MANS = bindctl.1
 
 EXTRA_DIST = $(man_MANS) bindctl.xml
 
+noinst_SCRIPTS = run_bindctl.sh
+
 python_PYTHON = __init__.py bindcmd.py cmdparse.py exception.py moduleinfo.py \
 		mycollections.py
 pythondir = $(pyexecdir)/bindctl
diff --git a/src/bin/bindctl/bindcmd.py b/src/bin/bindctl/bindcmd.py
index 0bfcda5..b67bc4b 100644
--- a/src/bin/bindctl/bindcmd.py
+++ b/src/bin/bindctl/bindcmd.py
@@ -46,6 +46,16 @@ except ImportError:
 # if we have readline support, use that, otherwise use normal stdio
 try:
     import readline
+    # This is a fix for the problem described in
+    # http://bind10.isc.org/ticket/1345
+    # If '-' is seen as a word-boundary, the final completion-step
+    # (as handled by the cmd module, and hence outside our reach) can
+    # mistakenly add data twice, resulting in wrong completion results
+    # The solution is to remove it.
+    delims = readline.get_completer_delims()
+    delims = delims.replace('-', '')
+    readline.set_completer_delims(delims)
+
     my_readline = readline.get_line_buffer
 except ImportError:
     my_readline = sys.stdin.readline
@@ -61,21 +71,21 @@ Type \"<module_name> <command_name> help\" for help on the specific command.
 \nAvailable module names: """
 
 class ValidatedHTTPSConnection(http.client.HTTPSConnection):
-    '''Overrides HTTPSConnection to support certification 
+    '''Overrides HTTPSConnection to support certification
     validation. '''
     def __init__(self, host, ca_certs):
         http.client.HTTPSConnection.__init__(self, host)
         self.ca_certs = ca_certs
 
     def connect(self):
-        ''' Overrides the connect() so that we do 
+        ''' Overrides the connect() so that we do
         certificate validation. '''
         sock = socket.create_connection((self.host, self.port),
                                         self.timeout)
         if self._tunnel_host:
             self.sock = sock
             self._tunnel()
-       
+
         req_cert = ssl.CERT_NONE
         if self.ca_certs:
             req_cert = ssl.CERT_REQUIRED
@@ -85,7 +95,7 @@ class ValidatedHTTPSConnection(http.client.HTTPSConnection):
                                     ca_certs=self.ca_certs)
 
 class BindCmdInterpreter(Cmd):
-    """simple bindctl example."""    
+    """simple bindctl example."""
 
     def __init__(self, server_port='localhost:8080', pem_file=None,
                  csv_file_dir=None):
@@ -118,29 +128,33 @@ class BindCmdInterpreter(Cmd):
                                       socket.gethostname())).encode())
         digest = session_id.hexdigest()
         return digest
-    
+
     def run(self):
         '''Parse commands from user and send them to cmdctl. '''
         try:
             if not self.login_to_cmdctl():
-                return
+                return 1
 
             self.cmdloop()
             print('\nExit from bindctl')
+            return 0
         except FailToLogin as err:
             # error already printed when this was raised, ignoring
-            pass
+            return 1
         except KeyboardInterrupt:
             print('\nExit from bindctl')
+            return 0
         except socket.error as err:
             print('Failed to send request, the connection is closed')
+            return 1
         except http.client.CannotSendRequest:
             print('Can not send request, the connection is busy')
+            return 1
 
     def _get_saved_user_info(self, dir, file_name):
-        ''' Read all the available username and password pairs saved in 
+        ''' Read all the available username and password pairs saved in
         file(path is "dir + file_name"), Return value is one list of elements
-        ['name', 'password'], If get information failed, empty list will be 
+        ['name', 'password'], If get information failed, empty list will be
         returned.'''
         if (not dir) or (not os.path.exists(dir)):
             return []
@@ -166,7 +180,7 @@ class BindCmdInterpreter(Cmd):
             if not os.path.exists(dir):
                 os.mkdir(dir, 0o700)
 
-            csvfilepath = dir + file_name 
+            csvfilepath = dir + file_name
             csvfile = open(csvfilepath, 'w')
             os.chmod(csvfilepath, 0o600)
             writer = csv.writer(csvfile)
@@ -180,7 +194,7 @@ class BindCmdInterpreter(Cmd):
         return True
 
     def login_to_cmdctl(self):
-        '''Login to cmdctl with the username and password inputted 
+        '''Login to cmdctl with the username and password inputted
         from user. After the login is sucessful, the username and
         password will be saved in 'default_user.csv', when run the next
         time, username and password saved in 'default_user.csv' will be
@@ -246,14 +260,14 @@ class BindCmdInterpreter(Cmd):
             if self.login_to_cmdctl():
                 # successful, so try send again
                 status, reply_msg = self._send_message(url, body)
-            
+
         if reply_msg:
             return json.loads(reply_msg.decode())
         else:
             return {}
-       
 
-    def send_POST(self, url, post_param = None): 
+
+    def send_POST(self, url, post_param = None):
         '''Send POST request to cmdctl, session id is send with the name
         'cookie' in header.
         Format: /module_name/command_name
@@ -312,12 +326,12 @@ class BindCmdInterpreter(Cmd):
     def _validate_cmd(self, cmd):
         '''validate the parameters and merge some parameters together,
         merge algorithm is based on the command line syntax, later, if
-        a better command line syntax come out, this function should be 
-        updated first.        
+        a better command line syntax come out, this function should be
+        updated first.
         '''
         if not cmd.module in self.modules:
             raise CmdUnknownModuleSyntaxError(cmd.module)
-        
+
         module_info = self.modules[cmd.module]
         if not module_info.has_command_with_name(cmd.command):
             raise CmdUnknownCmdSyntaxError(cmd.module, cmd.command)
@@ -325,17 +339,17 @@ class BindCmdInterpreter(Cmd):
         command_info = module_info.get_command_with_name(cmd.command)
         manda_params = command_info.get_mandatory_param_names()
         all_params = command_info.get_param_names()
-        
+
         # If help is entered, don't do further parameter validation.
         for val in cmd.params.keys():
             if val == "help":
                 return
-        
-        params = cmd.params.copy()       
-        if not params and manda_params:            
-            raise CmdMissParamSyntaxError(cmd.module, cmd.command, manda_params[0])            
+
+        params = cmd.params.copy()
+        if not params and manda_params:
+            raise CmdMissParamSyntaxError(cmd.module, cmd.command, manda_params[0])
         elif params and not all_params:
-            raise CmdUnknownParamSyntaxError(cmd.module, cmd.command, 
+            raise CmdUnknownParamSyntaxError(cmd.module, cmd.command,
                                              list(params.keys())[0])
         elif params:
             param_name = None
@@ -366,7 +380,7 @@ class BindCmdInterpreter(Cmd):
                         param_name = command_info.get_param_name_by_position(name, param_count)
                         cmd.params[param_name] = cmd.params[name]
                         del cmd.params[name]
-                        
+
                 elif not name in all_params:
                     raise CmdUnknownParamSyntaxError(cmd.module, cmd.command, name)
 
@@ -375,7 +389,7 @@ class BindCmdInterpreter(Cmd):
                 if not name in params and not param_nr in params:
                     raise CmdMissParamSyntaxError(cmd.module, cmd.command, name)
                 param_nr += 1
-        
+
         # Convert parameter value according parameter spec file.
         # Ignore check for commands belongs to module 'config'
         if cmd.module != CONFIG_MODULE_NAME:
@@ -384,9 +398,9 @@ class BindCmdInterpreter(Cmd):
                 try:
                     cmd.params[param_name] = isc.config.config_data.convert_type(param_spec, cmd.params[param_name])
                 except isc.cc.data.DataTypeError as e:
-                    raise isc.cc.data.DataTypeError('Invalid parameter value for \"%s\", the type should be \"%s\" \n' 
+                    raise isc.cc.data.DataTypeError('Invalid parameter value for \"%s\", the type should be \"%s\" \n'
                                                      % (param_name, param_spec['item_type']) + str(e))
-    
+
     def _handle_cmd(self, cmd):
         '''Handle a command entered by the user'''
         if cmd.command == "help" or ("help" in cmd.params.keys()):
@@ -398,6 +412,8 @@ class BindCmdInterpreter(Cmd):
                 print("Error: " + str(dte))
             except isc.cc.data.DataNotFoundError as dnfe:
                 print("Error: " + str(dnfe))
+            except isc.cc.data.DataAlreadyPresentError as dape:
+                print("Error: " + str(dape))
             except KeyError as ke:
                 print("Error: missing " + str(ke))
         else:
@@ -406,7 +422,7 @@ class BindCmdInterpreter(Cmd):
     def add_module_info(self, module_info):
         '''Add the information about one module'''
         self.modules[module_info.name] = module_info
-        
+
     def get_module_names(self):
         '''Return the names of all known modules'''
         return list(self.modules.keys())
@@ -438,15 +454,15 @@ class BindCmdInterpreter(Cmd):
                     subsequent_indent="    " +
                     " " * CONST_BINDCTL_HELP_INDENT_WIDTH,
                     width=70))
-    
+
     def onecmd(self, line):
         if line == 'EOF' or line.lower() == "quit":
             self.conn.close()
             return True
-            
+
         if line == 'h':
             line = 'help'
-        
+
         Cmd.onecmd(self, line)
 
     def remove_prefix(self, list, prefix):
@@ -474,7 +490,7 @@ class BindCmdInterpreter(Cmd):
                 cmd = BindCmdParse(cur_line)
                 if not cmd.params and text:
                     hints = self._get_command_startswith(cmd.module, text)
-                else:                       
+                else:
                     hints = self._get_param_startswith(cmd.module, cmd.command,
                                                        text)
                     if cmd.module == CONFIG_MODULE_NAME:
@@ -490,8 +506,8 @@ class BindCmdInterpreter(Cmd):
 
             except CmdMissCommandNameFormatError as e:
                 if not text.strip(): # command name is empty
-                    hints = self.modules[e.module].get_command_names()                    
-                else: 
+                    hints = self.modules[e.module].get_command_names()
+                else:
                     hints = self._get_module_startswith(text)
 
             except CmdCommandNameFormatError as e:
@@ -505,44 +521,43 @@ class BindCmdInterpreter(Cmd):
                 hints = []
 
             self.hint = hints
-            #self._append_space_to_hint()
 
         if state < len(self.hint):
             return self.hint[state]
         else:
             return None
-            
 
-    def _get_module_startswith(self, text):       
+
+    def _get_module_startswith(self, text):
         return [module
-                for module in self.modules 
+                for module in self.modules
                 if module.startswith(text)]
 
 
     def _get_command_startswith(self, module, text):
-        if module in self.modules:            
+        if module in self.modules:
             return [command
-                    for command in self.modules[module].get_command_names() 
+                    for command in self.modules[module].get_command_names()
                     if command.startswith(text)]
-        
-        return []                    
-                        
 
-    def _get_param_startswith(self, module, command, text):        
+        return []
+
+
+    def _get_param_startswith(self, module, command, text):
         if module in self.modules:
-            module_info = self.modules[module]            
-            if command in module_info.get_command_names():                
+            module_info = self.modules[module]
+            if command in module_info.get_command_names():
                 cmd_info = module_info.get_command_with_name(command)
-                params = cmd_info.get_param_names() 
+                params = cmd_info.get_param_names()
                 hint = []
-                if text:    
+                if text:
                     hint = [val for val in params if val.startswith(text)]
                 else:
                     hint = list(params)
-                
+
                 if len(hint) == 1 and hint[0] != "help":
-                    hint[0] = hint[0] + " ="    
-                
+                    hint[0] = hint[0] + " ="
+
                 return hint
 
         return []
@@ -559,24 +574,24 @@ class BindCmdInterpreter(Cmd):
             self._print_correct_usage(err)
         except isc.cc.data.DataTypeError as err:
             print("Error! ", err)
-            
-    def _print_correct_usage(self, ept):        
+
+    def _print_correct_usage(self, ept):
         if isinstance(ept, CmdUnknownModuleSyntaxError):
             self.do_help(None)
-            
+
         elif isinstance(ept, CmdUnknownCmdSyntaxError):
             self.modules[ept.module].module_help()
-            
+
         elif isinstance(ept, CmdMissParamSyntaxError) or \
              isinstance(ept, CmdUnknownParamSyntaxError):
              self.modules[ept.module].command_help(ept.command)
-                 
-                
+
+
     def _append_space_to_hint(self):
         """Append one space at the end of complete hint."""
         self.hint = [(val + " ") for val in self.hint]
-            
-            
+
+
     def _handle_help(self, cmd):
         if cmd.command == "help":
             self.modules[cmd.module].module_help()
@@ -634,7 +649,15 @@ class BindCmdInterpreter(Cmd):
                     # we have more data to show
                     line += "/"
                 else:
-                    line += "\t" + json.dumps(value_map['value'])
+                    # if type is named_set, don't print value if None
+                    # (it is either {} meaning empty, or None, meaning
+                    # there actually is data, but not to be shown with
+                    # the current command
+                    if value_map['type'] == 'named_set' and\
+                       value_map['value'] is None:
+                        line += "/\t"
+                    else:
+                        line += "\t" + json.dumps(value_map['value'])
                 line += "\t" + value_map['type']
                 line += "\t"
                 if value_map['default']:
@@ -649,10 +672,9 @@ class BindCmdInterpreter(Cmd):
                 data, default = self.config_data.get_value(identifier)
                 print(json.dumps(data))
         elif cmd.command == "add":
-            if 'value' in cmd.params:
-                self.config_data.add_value(identifier, cmd.params['value'])
-            else:
-                self.config_data.add_value(identifier)
+            self.config_data.add_value(identifier,
+                                       cmd.params.get('value_or_name'),
+                                       cmd.params.get('value_for_set'))
         elif cmd.command == "remove":
             if 'value' in cmd.params:
                 self.config_data.remove_value(identifier, cmd.params['value'])
@@ -679,7 +701,7 @@ class BindCmdInterpreter(Cmd):
             except isc.config.ModuleCCSessionError as mcse:
                 print(str(mcse))
         elif cmd.command == "diff":
-            print(self.config_data.get_local_changes());
+            print(self.config_data.get_local_changes())
         elif cmd.command == "go":
             self.go(identifier)
 
diff --git a/src/bin/bindctl/bindctl_main.py.in b/src/bin/bindctl/bindctl_main.py.in
index 01307e9..58c03eb 100755
--- a/src/bin/bindctl/bindctl_main.py.in
+++ b/src/bin/bindctl/bindctl_main.py.in
@@ -50,17 +50,28 @@ def prepare_config_commands(tool):
     cmd.add_param(param)
     module.add_command(cmd)
 
-    cmd = CommandInfo(name = "add", desc = "Add an entry to configuration list. If no value is given, a default value is added.")
+    cmd = CommandInfo(name = "add", desc =
+        "Add an entry to configuration list or a named set. "
+        "When adding to a list, the command has one optional argument, "
+        "a value to add to the list. The value must be in correct JSON "
+        "and complete. When adding to a named set, it has one "
+        "mandatory parameter (the name to add), and an optional "
+        "parameter value, similar to when adding to a list. "
+        "In either case, when no value is given, an entry will be "
+        "constructed with default values.")
     param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
     cmd.add_param(param)
-    param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to add to the list. It must be in correct JSON format and complete.")
+    param = ParamInfo(name = "value_or_name", type = "string", optional=True, desc = "Specifies a value to add to the list, or the name when adding to a named set. It must be in correct JSON format and complete.")
+    cmd.add_param(param)
+    module.add_command(cmd)
+    param = ParamInfo(name = "value_for_set", type = "string", optional=True, desc = "Specifies an optional value to add to the named map. It must be in correct JSON format and complete.")
     cmd.add_param(param)
     module.add_command(cmd)
 
-    cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list.")
+    cmd = CommandInfo(name = "remove", desc = "Remove entry from configuration list or named set.")
     param = ParamInfo(name = "identifier", type = "string", optional=True, desc = DEFAULT_IDENTIFIER_DESC)
     cmd.add_param(param)
-    param = ParamInfo(name = "value", type = "string", optional=True, desc = "Specifies a value to remove from the list. It must be in correct JSON format and complete.")
+    param = ParamInfo(name = "value", type = "string", optional=True, desc = "When identifier is a list, specifies a value to remove from the list. It must be in correct JSON format and complete. When it is a named set, specifies the name to remove.")
     cmd.add_param(param)
     module.add_command(cmd)
 
@@ -135,4 +146,5 @@ if __name__ == '__main__':
     tool = BindCmdInterpreter(server_addr, pem_file=options.cert_chain,
                               csv_file_dir=options.csv_file_dir)
     prepare_config_commands(tool)
-    tool.run()
+    result = tool.run()
+    sys.exit(result)
diff --git a/src/bin/bindctl/run_bindctl.sh.in b/src/bin/bindctl/run_bindctl.sh.in
index 8f6ba59..f4cc40c 100755
--- a/src/bin/bindctl/run_bindctl.sh.in
+++ b/src/bin/bindctl/run_bindctl.sh.in
@@ -20,14 +20,14 @@ export PYTHON_EXEC
 
 BINDCTL_PATH=@abs_top_builddir@/src/bin/bindctl
 
-PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_srcdir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/bin:@abs_top_srcdir@/src/lib/python
 export PYTHONPATH
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
 	export @ENV_LIBRARY_PATH@
 fi
 
diff --git a/src/bin/bindctl/tests/Makefile.am b/src/bin/bindctl/tests/Makefile.am
index 891d413..3d08a17 100644
--- a/src/bin/bindctl/tests/Makefile.am
+++ b/src/bin/bindctl/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin  \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/bindctl:$(abs_top_srcdir)/src/bin  \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/bindctl/tests/bindctl_test.py b/src/bin/bindctl/tests/bindctl_test.py
index 0635b32..cef35dc 100644
--- a/src/bin/bindctl/tests/bindctl_test.py
+++ b/src/bin/bindctl/tests/bindctl_test.py
@@ -31,14 +31,14 @@ from bindctl_main import set_bindctl_options
 from bindctl import cmdparse
 from bindctl import bindcmd
 from bindctl.moduleinfo import *
-from bindctl.exception import *    
+from bindctl.exception import *
 try:
     from collections import OrderedDict
 except ImportError:
     from mycollections import OrderedDict
 
 class TestCmdLex(unittest.TestCase):
-    
+
     def my_assert_raise(self, exception_type, cmd_line):
         self.assertRaises(exception_type, cmdparse.BindCmdParse, cmd_line)
 
@@ -48,13 +48,13 @@ class TestCmdLex(unittest.TestCase):
         assert cmd.module == "zone"
         assert cmd.command == "add"
         self.assertEqual(len(cmd.params), 0)
-        
-    
+
+
     def testCommandWithParameters(self):
         lines = {"zone add zone_name = cnnic.cn, file = cnnic.cn.file master=1.1.1.1",
                  "zone add zone_name = \"cnnic.cn\", file ='cnnic.cn.file' master=1.1.1.1  ",
                  "zone add zone_name = 'cnnic.cn\", file ='cnnic.cn.file' master=1.1.1.1, " }
-        
+
         for cmd_line in lines:
             cmd = cmdparse.BindCmdParse(cmd_line)
             assert cmd.module == "zone"
@@ -75,7 +75,7 @@ class TestCmdLex(unittest.TestCase):
         cmd = cmdparse.BindCmdParse('zone cmd name = 1\"\'34**&2 ,value=  44\"\'\"')
         self.assertEqual(cmd.params['name'], '1\"\'34**&2')
         self.assertEqual(cmd.params['value'], '44\"\'\"')
-            
+
         cmd = cmdparse.BindCmdParse('zone cmd name =  1\'34**&2value=44\"\'\" value = \"==============\'')
         self.assertEqual(cmd.params['name'], '1\'34**&2value=44\"\'\"')
         self.assertEqual(cmd.params['value'], '==============')
@@ -83,34 +83,34 @@ class TestCmdLex(unittest.TestCase):
         cmd = cmdparse.BindCmdParse('zone cmd name =    \"1234, 567890 \" value ==&*/')
         self.assertEqual(cmd.params['name'], '1234, 567890 ')
         self.assertEqual(cmd.params['value'], '=&*/')
-            
+
     def testCommandWithListParam(self):
         cmd = cmdparse.BindCmdParse("zone set zone_name='cnnic.cn', master='1.1.1.1, 2.2.2.2'")
-        assert cmd.params["master"] == '1.1.1.1, 2.2.2.2'            
-        
+        assert cmd.params["master"] == '1.1.1.1, 2.2.2.2'
+
     def testCommandWithHelpParam(self):
         cmd = cmdparse.BindCmdParse("zone add help")
         assert cmd.params["help"] == "help"
-        
+
         cmd = cmdparse.BindCmdParse("zone add help *&)&)*&&$#$^%")
         assert cmd.params["help"] == "help"
         self.assertEqual(len(cmd.params), 1)
-        
+
 
     def testCmdModuleNameFormatError(self):
         self.my_assert_raise(CmdModuleNameFormatError, "zone=good")
-        self.my_assert_raise(CmdModuleNameFormatError, "zo/ne")        
-        self.my_assert_raise(CmdModuleNameFormatError, "")        
+        self.my_assert_raise(CmdModuleNameFormatError, "zo/ne")
+        self.my_assert_raise(CmdModuleNameFormatError, "")
         self.my_assert_raise(CmdModuleNameFormatError, "=zone")
-        self.my_assert_raise(CmdModuleNameFormatError, "zone,")        
-        
-        
+        self.my_assert_raise(CmdModuleNameFormatError, "zone,")
+
+
     def testCmdMissCommandNameFormatError(self):
         self.my_assert_raise(CmdMissCommandNameFormatError, "zone")
         self.my_assert_raise(CmdMissCommandNameFormatError, "zone ")
         self.my_assert_raise(CmdMissCommandNameFormatError, "help ")
-        
-             
+
+
     def testCmdCommandNameFormatError(self):
         self.my_assert_raise(CmdCommandNameFormatError, "zone =d")
         self.my_assert_raise(CmdCommandNameFormatError, "zone z=d")
@@ -119,11 +119,11 @@ class TestCmdLex(unittest.TestCase):
         self.my_assert_raise(CmdCommandNameFormatError, "zone zdd/ \"")
 
 class TestCmdSyntax(unittest.TestCase):
-    
+
     def _create_bindcmd(self):
         """Create one bindcmd"""
-        
-        tool = bindcmd.BindCmdInterpreter()        
+
+        tool = bindcmd.BindCmdInterpreter()
         string_spec = { 'item_type' : 'string',
                        'item_optional' : False,
                        'item_default' : ''}
@@ -135,40 +135,40 @@ class TestCmdSyntax(unittest.TestCase):
         load_cmd = CommandInfo(name = "load")
         load_cmd.add_param(zone_file_param)
         load_cmd.add_param(zone_name)
-        
-        param_master = ParamInfo(name = "master", optional = True, param_spec = string_spec)                                 
-        param_master = ParamInfo(name = "port", optional = True, param_spec = int_spec)                                 
-        param_allow_update = ParamInfo(name = "allow_update", optional = True, param_spec = string_spec)                                           
+
+        param_master = ParamInfo(name = "master", optional = True, param_spec = string_spec)
+        param_master = ParamInfo(name = "port", optional = True, param_spec = int_spec)
+        param_allow_update = ParamInfo(name = "allow_update", optional = True, param_spec = string_spec)
         set_cmd = CommandInfo(name = "set")
         set_cmd.add_param(param_master)
         set_cmd.add_param(param_allow_update)
         set_cmd.add_param(zone_name)
-        
-        reload_all_cmd = CommandInfo(name = "reload_all")        
-        
-        zone_module = ModuleInfo(name = "zone")                             
+
+        reload_all_cmd = CommandInfo(name = "reload_all")
+
+        zone_module = ModuleInfo(name = "zone")
         zone_module.add_command(load_cmd)
         zone_module.add_command(set_cmd)
         zone_module.add_command(reload_all_cmd)
-        
+
         tool.add_module_info(zone_module)
         return tool
-        
-        
+
+
     def setUp(self):
         self.bindcmd = self._create_bindcmd()
-        
-        
+
+
     def no_assert_raise(self, cmd_line):
         cmd = cmdparse.BindCmdParse(cmd_line)
-        self.bindcmd._validate_cmd(cmd) 
-        
-        
+        self.bindcmd._validate_cmd(cmd)
+
+
     def my_assert_raise(self, exception_type, cmd_line):
         cmd = cmdparse.BindCmdParse(cmd_line)
-        self.assertRaises(exception_type, self.bindcmd._validate_cmd, cmd)  
-        
-        
+        self.assertRaises(exception_type, self.bindcmd._validate_cmd, cmd)
+
+
     def testValidateSuccess(self):
         self.no_assert_raise("zone load zone_file='cn' zone_name='cn'")
         self.no_assert_raise("zone load zone_file='cn', zone_name='cn', ")
@@ -178,27 +178,27 @@ class TestCmdSyntax(unittest.TestCase):
         self.no_assert_raise("zone set allow_update='1.1.1.1' zone_name='cn'")
         self.no_assert_raise("zone set zone_name='cn'")
         self.my_assert_raise(isc.cc.data.DataTypeError, "zone set zone_name ='cn', port='cn'")
-        self.no_assert_raise("zone reload_all")        
-        
-    
+        self.no_assert_raise("zone reload_all")
+
+
     def testCmdUnknownModuleSyntaxError(self):
         self.my_assert_raise(CmdUnknownModuleSyntaxError, "zoned d")
         self.my_assert_raise(CmdUnknownModuleSyntaxError, "dd dd  ")
-        
-              
+
+
     def testCmdUnknownCmdSyntaxError(self):
         self.my_assert_raise(CmdUnknownCmdSyntaxError, "zone dd")
-        
+
     def testCmdMissParamSyntaxError(self):
         self.my_assert_raise(CmdMissParamSyntaxError, "zone load zone_file='cn'")
         self.my_assert_raise(CmdMissParamSyntaxError, "zone load zone_name='cn'")
         self.my_assert_raise(CmdMissParamSyntaxError, "zone set allow_update='1.1.1.1'")
         self.my_assert_raise(CmdMissParamSyntaxError, "zone set ")
-        
+
     def testCmdUnknownParamSyntaxError(self):
         self.my_assert_raise(CmdUnknownParamSyntaxError, "zone load zone_d='cn'")
-        self.my_assert_raise(CmdUnknownParamSyntaxError, "zone reload_all zone_name = 'cn'")  
-       
+        self.my_assert_raise(CmdUnknownParamSyntaxError, "zone reload_all zone_name = 'cn'")
+
 class TestModuleInfo(unittest.TestCase):
 
     def test_get_param_name_by_position(self):
@@ -212,36 +212,36 @@ class TestModuleInfo(unittest.TestCase):
         self.assertEqual('sex', cmd.get_param_name_by_position(2, 3))
         self.assertEqual('data', cmd.get_param_name_by_position(2, 4))
         self.assertEqual('data', cmd.get_param_name_by_position(2, 4))
-        
+
         self.assertRaises(KeyError, cmd.get_param_name_by_position, 4, 4)
 
 
-    
+
 class TestNameSequence(unittest.TestCase):
     """
     Test if the module/command/parameters is saved in the order creation
     """
-    
+
     def _create_bindcmd(self):
-        """Create one bindcmd"""     
-        
+        """Create one bindcmd"""
+
         self._cmd = CommandInfo(name = "load")
         self.module = ModuleInfo(name = "zone")
-        self.tool = bindcmd.BindCmdInterpreter()        
+        self.tool = bindcmd.BindCmdInterpreter()
         for random_str in self.random_names:
             self._cmd.add_param(ParamInfo(name = random_str))
             self.module.add_command(CommandInfo(name = random_str))
-            self.tool.add_module_info(ModuleInfo(name = random_str))  
-        
+            self.tool.add_module_info(ModuleInfo(name = random_str))
+
     def setUp(self):
         self.random_names = ['1erdfeDDWsd', '3fe', '2009erd', 'Fe231', 'tere142', 'rei8WD']
         self._create_bindcmd()
-        
-    def testSequence(self):        
+
+    def testSequence(self):
         param_names = self._cmd.get_param_names()
         cmd_names = self.module.get_command_names()
         module_names = self.tool.get_module_names()
-        
+
         i = 0
         while i < len(self.random_names):
             assert self.random_names[i] == param_names[i+1]
@@ -342,7 +342,7 @@ class TestConfigCommands(unittest.TestCase):
         # validate log message for socket.err
         socket_err_output = io.StringIO()
         sys.stdout = socket_err_output
-        self.assertRaises(None, self.tool.run())
+        self.assertEqual(1, self.tool.run())
         self.assertEqual("Failed to send request, the connection is closed\n",
                          socket_err_output.getvalue())
         socket_err_output.close()
@@ -350,7 +350,7 @@ class TestConfigCommands(unittest.TestCase):
         # validate log message for http.client.CannotSendRequest
         cannot_send_output = io.StringIO()
         sys.stdout = cannot_send_output
-        self.assertRaises(None, self.tool.run())
+        self.assertEqual(1, self.tool.run())
         self.assertEqual("Can not send request, the connection is busy\n",
                          cannot_send_output.getvalue())
         cannot_send_output.close()
@@ -472,4 +472,4 @@ class TestCommandLineOptions(unittest.TestCase):
 
 if __name__== "__main__":
     unittest.main()
-    
+
diff --git a/src/bin/cfgmgr/b10-cfgmgr.py.in b/src/bin/cfgmgr/b10-cfgmgr.py.in
index 8befbdf..2ccc430 100755
--- a/src/bin/cfgmgr/b10-cfgmgr.py.in
+++ b/src/bin/cfgmgr/b10-cfgmgr.py.in
@@ -28,7 +28,7 @@ import os.path
 import isc.log
 isc.log.init("b10-cfgmgr")
 from isc.config.cfgmgr import ConfigManager, ConfigManagerDataReadError, logger
-from cfgmgr_messages import *
+from isc.log_messages.cfgmgr_messages import *
 
 isc.util.process.rename()
 
diff --git a/src/bin/cfgmgr/plugins/Makefile.am b/src/bin/cfgmgr/plugins/Makefile.am
index 529a4ed..5a4cfef 100644
--- a/src/bin/cfgmgr/plugins/Makefile.am
+++ b/src/bin/cfgmgr/plugins/Makefile.am
@@ -1,11 +1,14 @@
 SUBDIRS = tests
-EXTRA_DIST = README tsig_keys.py tsig_keys.spec
-EXTRA_DIST += logging.spec b10logging.py
+
+EXTRA_DIST = README logging.spec tsig_keys.spec
 
 config_plugindir = @prefix@/share/@PACKAGE@/config_plugins
-config_plugin_DATA = tsig_keys.py tsig_keys.spec
-config_plugin_DATA += b10logging.py logging.spec
+config_plugin_DATA = logging.spec tsig_keys.spec
+
+python_PYTHON = b10logging.py tsig_keys.py
+pythondir = $(config_plugindir)
 
+CLEANFILES = b10logging.pyc tsig_keys.pyc
 CLEANDIRS = __pycache__
 
 clean-local:
diff --git a/src/bin/cfgmgr/plugins/tests/Makefile.am b/src/bin/cfgmgr/plugins/tests/Makefile.am
index 07b7a85..ffea2d7 100644
--- a/src/bin/cfgmgr/plugins/tests/Makefile.am
+++ b/src/bin/cfgmgr/plugins/tests/Makefile.am
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,8 +19,8 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
+	B10_TEST_PLUGIN_DIR=$(abs_srcdir)/..:$(abs_builddir)/.. \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/cfgmgr/tests/Makefile.am b/src/bin/cfgmgr/tests/Makefile.am
index bd67241..a2e43ff 100644
--- a/src/bin/cfgmgr/tests/Makefile.am
+++ b/src/bin/cfgmgr/tests/Makefile.am
@@ -1,13 +1,14 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = b10-cfgmgr_test.py
 
-EXTRA_DIST = $(PYTESTS) testdata/plugins/testplugin.py
+noinst_SCRIPTS = $(PYTESTS)
+EXTRA_DIST = testdata/plugins/testplugin.py
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,9 +20,10 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env TESTDATA_PATH=$(abs_srcdir)/testdata \
+	chmod +x $(abs_builddir)/$$pytest ; \
+	TESTDATA_PATH=$(abs_srcdir)/testdata \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/python/isc/config \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cfgmgr:$(abs_top_builddir)/src/lib/python/isc/config \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
 
diff --git a/src/bin/cmdctl/Makefile.am b/src/bin/cmdctl/Makefile.am
index fcd23f8..e302fa6 100644
--- a/src/bin/cmdctl/Makefile.am
+++ b/src/bin/cmdctl/Makefile.am
@@ -3,7 +3,9 @@ SUBDIRS = . tests
 pkglibexecdir = $(libexecdir)/@PACKAGE@
 
 pkglibexec_SCRIPTS = b10-cmdctl
-pyexec_DATA = cmdctl_messages.py
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
 
 b10_cmdctldir = $(pkgdatadir)
 
@@ -19,7 +21,9 @@ b10_cmdctl_DATA += cmdctl.spec
 
 EXTRA_DIST = $(CMDCTL_CONFIGURATIONS)
 
-CLEANFILES=	b10-cmdctl cmdctl.pyc cmdctl.spec cmdctl_messages.py cmdctl_messages.pyc
+CLEANFILES= b10-cmdctl cmdctl.pyc cmdctl.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.pyc
 
 man_MANS = b10-cmdctl.8
 EXTRA_DIST += $(man_MANS) b10-cmdctl.xml cmdctl_messages.mes
@@ -34,11 +38,12 @@ endif
 cmdctl.spec: cmdctl.spec.pre
 	$(SED) -e "s|@@SYSCONFDIR@@|$(sysconfdir)|" cmdctl.spec.pre >$@
 
-cmdctl_messages.py: cmdctl_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/cmdctl/cmdctl_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py : cmdctl_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cmdctl_messages.mes
 
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-cmdctl: cmdctl.py cmdctl_messages.py
+b10-cmdctl: cmdctl.py $(PYTHON_LOGMSGPKG_DIR)/work/cmdctl_messages.py
 	$(SED) "s|@@PYTHONPATH@@|@pyexecdir@|" cmdctl.py >$@
 	chmod a+x $@
 
diff --git a/src/bin/cmdctl/cmdctl.py.in b/src/bin/cmdctl/cmdctl.py.in
index 2f89894..ff221db 100755
--- a/src/bin/cmdctl/cmdctl.py.in
+++ b/src/bin/cmdctl/cmdctl.py.in
@@ -17,12 +17,12 @@
 
 ''' cmdctl module is the configuration entry point for all commands from bindctl
 or some other web tools client of bind10. cmdctl is pure https server which provi-
-des RESTful API. When command client connecting with cmdctl, it should first login 
-with legal username and password. 
-    When cmdctl starting up, it will collect command specification and 
+des RESTful API. When command client connecting with cmdctl, it should first login
+with legal username and password.
+    When cmdctl starting up, it will collect command specification and
 configuration specification/data of other available modules from configmanager, then
 wait for receiving request from client, parse the request and resend the request to
-the proper module. When getting the request result from the module, send back the 
+the proper module. When getting the request result from the module, send back the
 resut to client.
 '''
 
@@ -47,19 +47,14 @@ import isc.net.parse
 from optparse import OptionParser, OptionValueError
 from hashlib import sha1
 from isc.util import socketserver_mixin
-from cmdctl_messages import *
-
-# TODO: these debug-levels are hard-coded here; we are planning on
-# creating a general set of debug levels, see ticket #1074. When done,
-# we should remove these values and use the general ones in the
-# logger.debug calls
-
-# Debug level for communication with BIND10
-DBG_CMDCTL_MESSAGING = 30
+from isc.log_messages.cmdctl_messages import *
 
 isc.log.init("b10-cmdctl")
 logger = isc.log.Logger("cmdctl")
 
+# Debug level for communication with BIND10
+DBG_CMDCTL_MESSAGING = logger.DBGLVL_COMMAND
+
 try:
     import threading
 except ImportError:
@@ -86,16 +81,16 @@ SPECFILE_LOCATION = SPECFILE_PATH + os.sep + "cmdctl.spec"
 
 class CmdctlException(Exception):
     pass
-       
+
 class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
     '''https connection request handler.
     Currently only GET and POST are supported.  '''
     def do_GET(self):
-        '''The client should send its session id in header with 
+        '''The client should send its session id in header with
         the name 'cookie'
         '''
         self.session_id = self.headers.get('cookie')
-        rcode, reply = http.client.OK, []        
+        rcode, reply = http.client.OK, []
         if self._is_session_valid():
             if self._is_user_logged_in():
                 rcode, reply = self._handle_get_request()
@@ -111,16 +106,16 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
     def _handle_get_request(self):
         '''Currently only support the following three url GET request '''
         id, module = self._parse_request_path()
-        return self.server.get_reply_data_for_GET(id, module) 
+        return self.server.get_reply_data_for_GET(id, module)
 
     def _is_session_valid(self):
-        return self.session_id 
+        return self.session_id
 
     def _is_user_logged_in(self):
         login_time = self.server.user_sessions.get(self.session_id)
         if not login_time:
             return False
-        
+
         idle_time = time.time() - login_time
         if idle_time > self.server.idle_timeout:
             return False
@@ -130,7 +125,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
 
     def _parse_request_path(self):
         '''Parse the url, the legal url should like /ldh or /ldh/ldh '''
-        groups = URL_PATTERN.match(self.path) 
+        groups = URL_PATTERN.match(self.path)
         if not groups:
             return (None, None)
         else:
@@ -138,8 +133,8 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
 
     def do_POST(self):
         '''Process POST request. '''
-        '''Process user login and send command to proper module  
-        The client should send its session id in header with 
+        '''Process user login and send command to proper module
+        The client should send its session id in header with
         the name 'cookie'
         '''
         self.session_id = self.headers.get('cookie')
@@ -153,7 +148,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
                 rcode, reply = http.client.UNAUTHORIZED, ["please login"]
         else:
             rcode, reply = http.client.BAD_REQUEST, ["session isn't valid"]
-      
+
         self.send_response(rcode)
         self.end_headers()
         self.wfile.write(json.dumps(reply).encode())
@@ -174,12 +169,12 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
         length = self.headers.get('Content-Length')
 
         if not length:
-            return False, ["invalid username or password"]     
+            return False, ["invalid username or password"]
 
         try:
             user_info = json.loads((self.rfile.read(int(length))).decode())
         except:
-            return False, ["invalid username or password"]                
+            return False, ["invalid username or password"]
 
         user_name = user_info.get('username')
         if not user_name:
@@ -198,7 +193,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
             return False, ["username or password error"]
 
         return True, None
-   
+
 
     def _handle_post_request(self):
         '''Handle all the post request from client. '''
@@ -220,7 +215,7 @@ class SecureHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
         if rcode != 0:
             ret = http.client.BAD_REQUEST
         return ret, reply
-    
+
     def log_request(self, code='-', size='-'):
         '''Rewrite the log request function, log nothing.'''
         pass
@@ -244,11 +239,11 @@ class CommandControl():
 
     def _setup_session(self):
         '''Setup the session for receving the commands
-        sent from other modules. There are two sessions 
-        for cmdctl, one(self.module_cc) is used for receiving 
-        commands sent from other modules, another one (self._cc) 
-        is used to send the command from Bindctl or other tools 
-        to proper modules.''' 
+        sent from other modules. There are two sessions
+        for cmdctl, one(self.module_cc) is used for receiving
+        commands sent from other modules, another one (self._cc)
+        is used to send the command from Bindctl or other tools
+        to proper modules.'''
         self._cc = isc.cc.Session()
         self._module_cc = isc.config.ModuleCCSession(SPECFILE_LOCATION,
                                               self.config_handler,
@@ -256,7 +251,7 @@ class CommandControl():
         self._module_name = self._module_cc.get_module_spec().get_module_name()
         self._cmdctl_config_data = self._module_cc.get_full_config()
         self._module_cc.start()
-    
+
     def _accounts_file_check(self, filepath):
         ''' Check whether the accounts file is valid, each row
         should be a list with 3 items.'''
@@ -293,7 +288,7 @@ class CommandControl():
                 errstr = self._accounts_file_check(new_config[key])
             else:
                 errstr = 'unknown config item: ' + key
-            
+
             if errstr != None:
                 logger.error(CMDCTL_BAD_CONFIG_DATA, errstr);
                 return ccsession.create_answer(1, errstr)
@@ -319,7 +314,7 @@ class CommandControl():
                 self.modules_spec[args[0]] = args[1]
 
         elif command == ccsession.COMMAND_SHUTDOWN:
-            #When cmdctl get 'shutdown' command from boss, 
+            #When cmdctl get 'shutdown' command from boss,
             #shutdown the outer httpserver.
             self._httpserver.shutdown()
             self._serving = False
@@ -389,12 +384,12 @@ class CommandControl():
         specs = self.get_modules_spec()
         if module_name not in specs.keys():
             return 1, {'error' : 'unknown module'}
-       
+
         spec_obj = isc.config.module_spec.ModuleSpec(specs[module_name], False)
         errors = []
         if not spec_obj.validate_command(command_name, params, errors):
             return 1, {'error': errors[0]}
-        
+
         return self.send_command(module_name, command_name, params)
 
     def send_command(self, module_name, command_name, params = None):
@@ -405,7 +400,7 @@ class CommandControl():
                      command_name, module_name)
 
         if module_name == self._module_name:
-            # Process the command sent to cmdctl directly. 
+            # Process the command sent to cmdctl directly.
             answer = self.command_handler(command_name, params)
         else:
             msg = ccsession.create_command(command_name, params)
@@ -434,7 +429,7 @@ class CommandControl():
 
         logger.error(CMDCTL_COMMAND_ERROR, command_name, module_name, errstr)
         return 1, {'error': errstr}
-    
+
     def get_cmdctl_config_data(self):
         ''' If running in source code tree, use keyfile, certificate
         and user accounts file in source code. '''
@@ -458,13 +453,15 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
     '''Make the server address can be reused.'''
     allow_reuse_address = True
 
-    def __init__(self, server_address, RequestHandlerClass, 
+    def __init__(self, server_address, RequestHandlerClass,
                  CommandControlClass,
                  idle_timeout = 1200, verbose = False):
         '''idle_timeout: the max idle time for login'''
         socketserver_mixin.NoPollMixIn.__init__(self)
         try:
             http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass)
+            logger.debug(DBG_CMDCTL_MESSAGING, CMDCTL_STARTED,
+                         server_address[0], server_address[1])
         except socket.error as err:
             raise CmdctlException("Error creating server, because: %s \n" % str(err))
 
@@ -477,9 +474,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
         self._accounts_file = None
 
     def _create_user_info(self, accounts_file):
-        '''Read all user's name and its' salt, hashed password 
+        '''Read all user's name and its' salt, hashed password
         from accounts file.'''
-        if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0): 
+        if (self._accounts_file == accounts_file) and (len(self._user_infos) > 0):
             return
 
         with self._lock:
@@ -500,10 +497,10 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
         self._accounts_file = accounts_file
         if len(self._user_infos) == 0:
             logger.error(CMDCTL_NO_USER_ENTRIES_READ)
-         
+
     def get_user_info(self, username):
         '''Get user's salt and hashed string. If the user
-        doesn't exist, return None, or else, the list 
+        doesn't exist, return None, or else, the list
         [salt, hashed password] will be returned.'''
         with self._lock:
             info = self._user_infos.get(username)
@@ -512,9 +509,9 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
     def save_user_session_id(self, session_id):
         ''' Record user's id and login time. '''
         self.user_sessions[session_id] = time.time()
-        
+
     def _check_key_and_cert(self, key, cert):
-        # TODO, check the content of key/certificate file 
+        # TODO, check the content of key/certificate file
         if not os.path.exists(key):
             raise CmdctlException("key file '%s' doesn't exist " % key)
 
@@ -529,7 +526,7 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
                                       certfile = cert,
                                       keyfile = key,
                                       ssl_version = ssl.PROTOCOL_SSLv23)
-            return ssl_sock 
+            return ssl_sock
         except (ssl.SSLError, CmdctlException) as err :
             logger.info(CMDCTL_SSL_SETUP_FAILURE_USER_DENIED, err)
             self.close_request(sock)
@@ -546,18 +543,18 @@ class SecureHTTPServer(socketserver_mixin.NoPollMixIn,
 
     def get_reply_data_for_GET(self, id, module):
         '''Currently only support the following three url GET request '''
-        rcode, reply = http.client.NO_CONTENT, []        
+        rcode, reply = http.client.NO_CONTENT, []
         if not module:
             if id == CONFIG_DATA_URL:
                 rcode, reply = http.client.OK, self.cmdctl.get_config_data()
             elif id == MODULE_SPEC_URL:
                 rcode, reply = http.client.OK, self.cmdctl.get_modules_spec()
-        
-        return rcode, reply 
+
+        return rcode, reply
 
     def send_command_to_module(self, module_name, command_name, params):
         return self.cmdctl.send_command_with_check(module_name, command_name, params)
-   
+
 httpd = None
 
 def signal_handler(signal, frame):
@@ -571,10 +568,9 @@ def set_signal_handler():
 
 def run(addr = 'localhost', port = 8080, idle_timeout = 1200, verbose = False):
     ''' Start cmdctl as one https server. '''
-    if verbose:
-        sys.stdout.write("[b10-cmdctl] starting on %s port:%d\n" %(addr, port))
-    httpd = SecureHTTPServer((addr, port), SecureHTTPRequestHandler, 
+    httpd = SecureHTTPServer((addr, port), SecureHTTPRequestHandler,
                              CommandControl, idle_timeout, verbose)
+
     httpd.serve_forever()
 
 def check_port(option, opt_str, value, parser):
@@ -612,6 +608,8 @@ if __name__ == '__main__':
     (options, args) = parser.parse_args()
     result = 1                  # in case of failure
     try:
+        if options.verbose:
+            logger.set_severity("DEBUG", 99)
         run(options.addr, options.port, options.idle_timeout, options.verbose)
         result = 0
     except isc.cc.SessionError as err:
diff --git a/src/bin/cmdctl/cmdctl_messages.mes b/src/bin/cmdctl/cmdctl_messages.mes
index 55b941f..a3371b9 100644
--- a/src/bin/cmdctl/cmdctl_messages.mes
+++ b/src/bin/cmdctl/cmdctl_messages.mes
@@ -64,12 +64,15 @@ be set up. The specific error is given in the log message. Possible
 causes may be that the ssl request itself was bad, or the local key or
 certificate file could not be read.
 
+% CMDCTL_STARTED cmdctl is listening for connections on %1:%2
+The cmdctl daemon has started and is now listening for connections.
+
 % CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down
 There was a keyboard interrupt signal to stop the cmdctl daemon. The
 daemon will now shut down.
 
 % CMDCTL_UNCAUGHT_EXCEPTION uncaught exception: %1
-The b10-cdmctl daemon encountered an uncaught exception and
+The b10-cmdctl daemon encountered an uncaught exception and
 will now shut down. This is indicative of a programming error and
 should not happen under normal circumstances. The exception message
 is printed.
diff --git a/src/bin/cmdctl/run_b10-cmdctl.sh.in b/src/bin/cmdctl/run_b10-cmdctl.sh.in
index 6a519e1..7e63249 100644
--- a/src/bin/cmdctl/run_b10-cmdctl.sh.in
+++ b/src/bin/cmdctl/run_b10-cmdctl.sh.in
@@ -19,9 +19,17 @@ PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
 export PYTHON_EXEC
 
 CMD_CTRLD_PATH=@abs_top_builddir@/src/bin/cmdctl
-PYTHONPATH=@abs_top_srcdir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
 export PYTHONPATH
 
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+        @ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+        export @ENV_LIBRARY_PATH@
+fi
+
 BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
 export BIND10_MSGQ_SOCKET_FILE
 
diff --git a/src/bin/cmdctl/tests/Makefile.am b/src/bin/cmdctl/tests/Makefile.am
index e4ec9d4..89d89ea 100644
--- a/src/bin/cmdctl/tests/Makefile.am
+++ b/src/bin/cmdctl/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,7 +19,7 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/cmdctl \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/cmdctl \
 	CMDCTL_SPEC_PATH=$(abs_top_builddir)/src/bin/cmdctl \
 	CMDCTL_SRC_PATH=$(abs_top_srcdir)/src/bin/cmdctl \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
diff --git a/src/bin/dhcp6/Makefile.am b/src/bin/dhcp6/Makefile.am
index 8d341cb..b0f8cd9 100644
--- a/src/bin/dhcp6/Makefile.am
+++ b/src/bin/dhcp6/Makefile.am
@@ -4,9 +4,7 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_srcdir)/src/bin -I$(top_builddir)/src/bin
 AM_CPPFLAGS += -I$(top_srcdir)/src/lib/dns -I$(top_builddir)/src/lib/dns
 AM_CPPFLAGS += -I$(top_srcdir)/src/lib/cc -I$(top_builddir)/src/lib/cc
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
-AM_CPPFLAGS += -I$(top_builddir)/src/lib/asiolink
-AM_CPPFLAGS += $(BOOST_INCLUDES)
+ AM_CPPFLAGS += $(BOOST_INCLUDES)
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
@@ -19,34 +17,30 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
 CLEANFILES = *.gcno *.gcda spec_config.h
 
 man_MANS = b10-dhcp6.8
-EXTRA_DIST = $(man_MANS) dhcp6.spec
+EXTRA_DIST = $(man_MANS) dhcp6.spec interfaces.txt
 
-#if ENABLE_MAN
-#b10-dhcp6.8: b10-dhcp6.xml
-#	xsltproc --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/b10-dhcp6.xml
-#endif
+if ENABLE_MAN
+
+b10-dhcp6.8: b10-dhcp6.xml
+	xsltproc --novalid --xinclude --nonet -o $@ http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl $(srcdir)/b10-dhcp6.xml
+
+endif
 
 spec_config.h: spec_config.h.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" spec_config.h.pre >$@
 
 BUILT_SOURCES = spec_config.h
 pkglibexec_PROGRAMS = b10-dhcp6
-b10_dhcp6_SOURCES = main.cc
-b10_dhcp6_SOURCES += dhcp6.h
-b10_dhcp6_LDADD =  $(top_builddir)/src/lib/datasrc/libdatasrc.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/config/libcfgclient.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+
+b10_dhcp6_SOURCES = main.cc iface_mgr.cc dhcp6_srv.cc
+b10_dhcp6_SOURCES += iface_mgr.h dhcp6_srv.h
+
+b10_dhcp6_LDADD = $(top_builddir)/src/lib/dhcp/libdhcp.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 b10_dhcp6_LDADD += $(top_builddir)/src/lib/log/liblog.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
-b10_dhcp6_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
-b10_dhcp6_LDADD += $(SQLITE_LIBS)
 
 # TODO: config.h.in is wrong because doesn't honor pkgdatadir
 # and can't use @datadir@ because doesn't expand default ${prefix}
 b10_dhcp6dir = $(pkgdatadir)
-b10_dhcp6_DATA = dhcp6.spec
-
+b10_dhcp6_DATA = dhcp6.spec interfaces.txt
diff --git a/src/bin/dhcp6/b10-dhcp6.8 b/src/bin/dhcp6/b10-dhcp6.8
index 14a5621..1f34a9a 100644
--- a/src/bin/dhcp6/b10-dhcp6.8
+++ b/src/bin/dhcp6/b10-dhcp6.8
@@ -1,13 +1,13 @@
 '\" t
-.\"     Title: b10-dhpc6
+.\"     Title: b10-dhcp6
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: March 8, 2011
+.\"      Date: October 27, 2011
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"
-.TH "B10\-DHCP6" "8" "March 8, 2011" "BIND10" "BIND10"
+.TH "B10\-DHCP6" "8" "October 27, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" -----------------------------------------------------------------
@@ -19,31 +19,32 @@
 .\" * MAIN CONTENT STARTS HERE *
 .\" -----------------------------------------------------------------
 .SH "NAME"
-b10-dhcp6 \- DHCPv6 daemon in BIND10 architecture
+b10-dhcp6 \- DHCPv6 server in BIND 10 architecture
 .SH "SYNOPSIS"
 .HP \w'\fBb10\-dhcp6\fR\ 'u
-\fBb10\-dhcp6\fR [\fB\-u\ \fR\fB\fIusername\fR\fR] [\fB\-v\fR]
+\fBb10\-dhcp6\fR [\fB\-v\fR]
 .SH "DESCRIPTION"
 .PP
 The
 \fBb10\-dhcp6\fR
-daemon will provide DHCPv6 server implementation when it becomes functional.
+daemon will provide the DHCPv6 server implementation when it becomes functional\&.
+.SH "ARGUMENTS"
 .PP
+The arguments are as follows:
+.PP
+\fB\-v\fR
+.RS 4
+Enable verbose mode\&.
+.RE
 .SH "SEE ALSO"
 .PP
 
-\fBb10-cfgmgr\fR(8),
-\fBb10-loadzone\fR(8),
-\fBb10-msgq\fR(8),
-\fBb10-stats\fR(8),
-\fBb10-zonemgr\fR(8),
-\fBbind10\fR(8),
-BIND 10 Guide\&.
+\fBbind10\fR(8)\&.
 .SH "HISTORY"
 .PP
 The
 \fBb10\-dhcp6\fR
-daemon was first coded in June 2011\&.
+daemon was first coded in June 2011 by Tomek Mrugalski\&.
 .SH "COPYRIGHT"
 .br
 Copyright \(co 2011 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/dhcp6/b10-dhcp6.xml b/src/bin/dhcp6/b10-dhcp6.xml
new file mode 100644
index 0000000..53227db
--- /dev/null
+++ b/src/bin/dhcp6/b10-dhcp6.xml
@@ -0,0 +1,98 @@
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+               "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"
+	       [<!ENTITY mdash "—">]>
+<!--
+ - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+-->
+
+<refentry>
+
+  <refentryinfo>
+    <date>October 27, 2011</date>
+  </refentryinfo>
+
+  <refmeta>
+    <refentrytitle>b10-dhcp6</refentrytitle>
+    <manvolnum>8</manvolnum>
+    <refmiscinfo>BIND10</refmiscinfo>
+  </refmeta>
+
+  <refnamediv>
+    <refname>b10-dhcp6</refname>
+    <refpurpose>DHCPv6 server in BIND 10 architecture</refpurpose>
+  </refnamediv>
+
+  <docinfo>
+    <copyright>
+      <year>2011</year>
+      <holder>Internet Systems Consortium, Inc. ("ISC")</holder>
+    </copyright>
+  </docinfo>
+
+  <refsynopsisdiv>
+    <cmdsynopsis>
+      <command>b10-dhcp6</command>
+      <arg><option>-v</option></arg>
+    </cmdsynopsis>
+  </refsynopsisdiv>
+
+  <refsect1>
+    <title>DESCRIPTION</title>
+    <para>
+      The <command>b10-dhcp6</command> daemon will provide the
+       DHCPv6 server implementation when it becomes functional.
+    </para>
+
+  </refsect1>
+
+  <refsect1>
+    <title>ARGUMENTS</title>
+
+    <para>The arguments are as follows:</para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term><option>-v</option></term>
+        <listitem><para>
+          Enable verbose mode.
+<!-- TODO: what does this do? -->
+        </para></listitem>
+      </varlistentry>
+
+    </variablelist>
+  </refsect1>
+
+  <refsect1>
+    <title>SEE ALSO</title>
+    <para>
+      <citerefentry>
+        <refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum>
+      </citerefentry>.
+    </para>
+  </refsect1>
+
+  <refsect1>
+    <title>HISTORY</title>
+    <para>
+      The <command>b10-dhcp6</command> daemon was first coded in
+      June 2011 by Tomek Mrugalski.
+    </para>
+  </refsect1>
+</refentry><!--
+ - Local variables:
+ - mode: sgml
+ - End:
+-->
diff --git a/src/bin/dhcp6/dhcp6.h b/src/bin/dhcp6/dhcp6.h
deleted file mode 100644
index 322b06c..0000000
--- a/src/bin/dhcp6/dhcp6.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/* dhcp6.h
-
-   DHCPv6 Protocol structures... */
-
-/*
- * Copyright (c) 2006-2011 by Internet Systems Consortium, Inc. ("ISC")
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
- * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
- *   Internet Systems Consortium, Inc.
- *   950 Charter Street
- *   Redwood City, CA 94063
- *   <info at isc.org>
- *   https://www.isc.org/
- */
-
-
-/* DHCPv6 Option codes: */
-
-#define D6O_CLIENTID				1 /* RFC3315 */
-#define D6O_SERVERID				2
-#define D6O_IA_NA				3
-#define D6O_IA_TA				4
-#define D6O_IAADDR				5
-#define D6O_ORO					6
-#define D6O_PREFERENCE				7
-#define D6O_ELAPSED_TIME			8
-#define D6O_RELAY_MSG				9
-/* Option code 10 unassigned. */
-#define D6O_AUTH				11
-#define D6O_UNICAST				12
-#define D6O_STATUS_CODE				13
-#define D6O_RAPID_COMMIT			14
-#define D6O_USER_CLASS				15
-#define D6O_VENDOR_CLASS			16
-#define D6O_VENDOR_OPTS				17
-#define D6O_INTERFACE_ID			18
-#define D6O_RECONF_MSG				19
-#define D6O_RECONF_ACCEPT			20
-#define D6O_SIP_SERVERS_DNS			21 /* RFC3319 */
-#define D6O_SIP_SERVERS_ADDR			22 /* RFC3319 */
-#define D6O_NAME_SERVERS			23 /* RFC3646 */
-#define D6O_DOMAIN_SEARCH			24 /* RFC3646 */
-#define D6O_IA_PD				25 /* RFC3633 */
-#define D6O_IAPREFIX				26 /* RFC3633 */
-#define D6O_NIS_SERVERS				27 /* RFC3898 */
-#define D6O_NISP_SERVERS			28 /* RFC3898 */
-#define D6O_NIS_DOMAIN_NAME			29 /* RFC3898 */
-#define D6O_NISP_DOMAIN_NAME			30 /* RFC3898 */
-#define D6O_SNTP_SERVERS			31 /* RFC4075 */
-#define D6O_INFORMATION_REFRESH_TIME		32 /* RFC4242 */
-#define D6O_BCMCS_SERVER_D			33 /* RFC4280 */
-#define D6O_BCMCS_SERVER_A			34 /* RFC4280 */
-/* 35 is unassigned */
-#define D6O_GEOCONF_CIVIC			36 /* RFC4776 */
-#define D6O_REMOTE_ID				37 /* RFC4649 */
-#define D6O_SUBSCRIBER_ID			38 /* RFC4580 */
-#define D6O_CLIENT_FQDN				39 /* RFC4704 */
-#define D6O_PANA_AGENT				40 /* paa-option */
-#define D6O_NEW_POSIX_TIMEZONE			41 /* RFC4833 */
-#define D6O_NEW_TZDB_TIMEZONE			42 /* RFC4833 */
-#define D6O_ERO					43 /* RFC4994 */
-#define D6O_LQ_QUERY				44 /* RFC5007 */
-#define D6O_CLIENT_DATA				45 /* RFC5007 */
-#define D6O_CLT_TIME				46 /* RFC5007 */
-#define D6O_LQ_RELAY_DATA			47 /* RFC5007 */
-#define D6O_LQ_CLIENT_LINK			48 /* RFC5007 */
-
-/* 
- * Status Codes, from RFC 3315 section 24.4, and RFC 3633, 5007.
- */
-#define STATUS_Success		 0
-#define STATUS_UnspecFail	 1
-#define STATUS_NoAddrsAvail	 2
-#define STATUS_NoBinding	 3
-#define STATUS_NotOnLink	 4 
-#define STATUS_UseMulticast	 5 
-#define STATUS_NoPrefixAvail	 6
-#define STATUS_UnknownQueryType	 7
-#define STATUS_MalformedQuery	 8
-#define STATUS_NotConfigured	 9
-#define STATUS_NotAllowed	10
-
-/* 
- * DHCPv6 message types, defined in section 5.3 of RFC 3315 
- */
-#define DHCPV6_SOLICIT		    1
-#define DHCPV6_ADVERTISE	    2
-#define DHCPV6_REQUEST		    3
-#define DHCPV6_CONFIRM		    4
-#define DHCPV6_RENEW		    5
-#define DHCPV6_REBIND		    6
-#define DHCPV6_REPLY		    7
-#define DHCPV6_RELEASE		    8
-#define DHCPV6_DECLINE		    9
-#define DHCPV6_RECONFIGURE	   10
-#define DHCPV6_INFORMATION_REQUEST 11
-#define DHCPV6_RELAY_FORW	   12
-#define DHCPV6_RELAY_REPL	   13
-#define DHCPV6_LEASEQUERY	   14
-#define DHCPV6_LEASEQUERY_REPLY    15
-
-extern const char *dhcpv6_type_names[];
-extern const int dhcpv6_type_name_max;
-
-/* DUID type definitions (RFC3315 section 9).
- */
-#define DUID_LLT	1
-#define DUID_EN		2
-#define DUID_LL		3
-
-/* Offsets into IA_*'s where Option spaces commence.  */
-#define IA_NA_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
-#define IA_TA_OFFSET  4 /* IAID only, 4 octets */
-#define IA_PD_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
-
-/* Offset into IAADDR's where Option spaces commence. */
-#define IAADDR_OFFSET 24
-
-/* Offset into IAPREFIX's where Option spaces commence. */
-#define IAPREFIX_OFFSET 25
-
-/* Offset into LQ_QUERY's where Option spaces commence. */
-#define LQ_QUERY_OFFSET 17
-
-/* 
- * DHCPv6 well-known multicast addressess, from section 5.1 of RFC 3315 
- */
-#define All_DHCP_Relay_Agents_and_Servers "FF02::1:2"
-#define All_DHCP_Servers "FF05::1:3"
-
-/*
- * DHCPv6 Retransmission Constants (RFC3315 section 5.5, RFC 5007)
- */
-
-#define SOL_MAX_DELAY     1
-#define SOL_TIMEOUT       1
-#define SOL_MAX_RT      120
-#define REQ_TIMEOUT       1
-#define REQ_MAX_RT       30
-#define REQ_MAX_RC       10
-#define CNF_MAX_DELAY     1
-#define CNF_TIMEOUT       1
-#define CNF_MAX_RT        4
-#define CNF_MAX_RD       10
-#define REN_TIMEOUT      10
-#define REN_MAX_RT      600
-#define REB_TIMEOUT      10
-#define REB_MAX_RT      600
-#define INF_MAX_DELAY     1
-#define INF_TIMEOUT       1
-#define INF_MAX_RT      120
-#define REL_TIMEOUT       1
-#define REL_MAX_RC        5
-#define DEC_TIMEOUT       1
-#define DEC_MAX_RC        5
-#define REC_TIMEOUT       2
-#define REC_MAX_RC        8
-#define HOP_COUNT_LIMIT  32
-#define LQ6_TIMEOUT       1
-#define LQ6_MAX_RT       10
-#define LQ6_MAX_RC        5
-
-/* 
- * Normal packet format, defined in section 6 of RFC 3315 
- */
-struct dhcpv6_packet {
-	unsigned char msg_type;
-	unsigned char transaction_id[3];
-	unsigned char options[FLEXIBLE_ARRAY_MEMBER];
-};
-
-/* Offset into DHCPV6 Reply packets where Options spaces commence. */
-#define REPLY_OPTIONS_INDEX 4
-
-/* 
- * Relay packet format, defined in section 7 of RFC 3315 
- */
-struct dhcpv6_relay_packet {
-	unsigned char msg_type;
-	unsigned char hop_count;
-	unsigned char link_address[16];
-	unsigned char peer_address[16];
-	unsigned char options[FLEXIBLE_ARRAY_MEMBER];
-};
-
-/* Leasequery query-types (RFC 5007) */
-
-#define LQ6QT_BY_ADDRESS	1
-#define LQ6QT_BY_CLIENTID	2
-
-/*
- * DUID time starts 2000-01-01.
- * This constant is the number of seconds since 1970-01-01,
- * when the Unix epoch began.
- */
-#define DUID_TIME_EPOCH 946684800
-
-/* Information-Request Time option (RFC 4242) */
-
-#define IRT_DEFAULT	86400
-#define IRT_MINIMUM	600
-
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
new file mode 100644
index 0000000..ba5afec
--- /dev/null
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -0,0 +1,231 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/pkt6.h"
+#include "dhcp6/iface_mgr.h"
+#include "dhcp6/dhcp6_srv.h"
+#include "dhcp/option6_ia.h"
+#include "dhcp/option6_iaaddr.h"
+#include "asiolink/io_address.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+
+Dhcpv6Srv::Dhcpv6Srv() {
+    cout << "Initialization" << endl;
+
+    // first call to instance() will create IfaceMgr (it's a singleton)
+    // it may throw something if things go wrong
+    IfaceMgr::instance();
+
+    /// @todo: instantiate LeaseMgr here once it is imlpemented.
+
+    setServerID();
+
+    shutdown = false;
+}
+
+Dhcpv6Srv::~Dhcpv6Srv() {
+    cout << "DHCPv6 Srv shutdown." << endl;
+}
+
+bool
+Dhcpv6Srv::run() {
+    while (!shutdown) {
+        boost::shared_ptr<Pkt6> query; // client's message
+        boost::shared_ptr<Pkt6> rsp;   // server's response
+
+        query = IfaceMgr::instance().receive();
+
+        if (query) {
+            if (!query->unpack()) {
+                cout << "Failed to parse incoming packet" << endl;
+                continue;
+            }
+            switch (query->getType()) {
+            case DHCPV6_SOLICIT:
+                rsp = processSolicit(query);
+                break;
+            case DHCPV6_REQUEST:
+                rsp = processRequest(query);
+                break;
+            case DHCPV6_RENEW:
+                rsp = processRenew(query);
+                break;
+            case DHCPV6_REBIND:
+                rsp = processRebind(query);
+                break;
+            case DHCPV6_CONFIRM:
+                rsp = processConfirm(query);
+                break;
+            case DHCPV6_RELEASE:
+                rsp = processRelease(query);
+                break;
+            case DHCPV6_DECLINE:
+                rsp = processDecline(query);
+                break;
+            case DHCPV6_INFORMATION_REQUEST:
+                rsp = processInfRequest(query);
+                break;
+            default:
+                cout << "Unknown pkt type received:"
+                     << query->getType() << endl;
+            }
+
+            cout << "Received " << query->data_len_ << " bytes packet type="
+                 << query->getType() << endl;
+            cout << query->toText();
+            if (rsp) {
+                rsp->remote_addr_ = query->remote_addr_;
+                rsp->local_addr_ = query->local_addr_;
+                rsp->remote_port_ = DHCP6_CLIENT_PORT;
+                rsp->local_port_ = DHCP6_SERVER_PORT;
+                rsp->ifindex_ = query->ifindex_;
+                rsp->iface_ = query->iface_;
+                cout << "Replying with:" << rsp->getType() << endl;
+                cout << rsp->toText();
+                cout << "----" << endl;
+                if (rsp->pack()) {
+                    cout << "#### pack successful." << endl;
+                }
+                IfaceMgr::instance().send(rsp);
+            }
+        }
+
+        // TODO add support for config session (see src/bin/auth/main.cc)
+        //      so this daemon can be controlled from bob
+    }
+
+    return (true);
+}
+
+void
+Dhcpv6Srv::setServerID() {
+    /// TODO implement this for real once interface detection is done.
+    /// Use hardcoded server-id for now
+
+    boost::shared_array<uint8_t> srvid(new uint8_t[14]);
+    srvid[0] = 0;
+    srvid[1] = 1; // DUID type 1 = DUID-LLT (see section 9.2 of RFC3315)
+    srvid[2] = 0;
+    srvid[3] = 6; // HW type = ethernet (I think. I'm typing this from my head
+                  // in hotel, without Internet connection)
+    for (int i=4; i<14; i++) {
+        srvid[i]=i-4;
+    }
+    serverid_ = boost::shared_ptr<Option>(new Option(Option::V6,
+                                                     D6O_SERVERID,
+                                                     srvid,
+                                                     0, 14));
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processSolicit(boost::shared_ptr<Pkt6> solicit) {
+
+    boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_ADVERTISE,
+                                           solicit->getTransid(),
+                                           Pkt6::UDP));
+
+    /// TODO Rewrite this once LeaseManager is implemented.
+
+    // answer client's IA (this is mostly a dummy,
+    // so let's answer only first IA and hope there is only one)
+    boost::shared_ptr<Option> ia_opt = solicit->getOption(D6O_IA_NA);
+    if (ia_opt) {
+        // found IA
+        Option* tmp = ia_opt.get();
+        Option6IA* ia_req = dynamic_cast<Option6IA*>(tmp);
+        if (ia_req) {
+            boost::shared_ptr<Option6IA>
+                ia_rsp(new Option6IA(D6O_IA_NA, ia_req->getIAID()));
+            ia_rsp->setT1(1500);
+            ia_rsp->setT2(2600);
+            boost::shared_ptr<Option6IAAddr>
+                addr(new Option6IAAddr(D6O_IAADDR,
+                                       IOAddress("2001:db8:1234:5678::abcd"),
+                                       5000, 7000));
+            ia_rsp->addOption(addr);
+            reply->addOption(ia_rsp);
+        }
+    }
+
+    // add client-id
+    boost::shared_ptr<Option> clientid = solicit->getOption(D6O_CLIENTID);
+    if (clientid) {
+        reply->addOption(clientid);
+    }
+
+    // add server-id
+    reply->addOption(getServerID());
+    return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processRequest(boost::shared_ptr<Pkt6> request) {
+    /// TODO: Implement processRequest() for real
+    boost::shared_ptr<Pkt6> reply = processSolicit(request);
+    reply->setType(DHCPV6_REPLY);
+    return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processRenew(boost::shared_ptr<Pkt6> renew) {
+    boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+                                           renew->getTransid(),
+                                           Pkt6::UDP));
+    return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processRebind(boost::shared_ptr<Pkt6> rebind) {
+    boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+                                           rebind->getTransid(),
+                                           Pkt6::UDP));
+    return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processConfirm(boost::shared_ptr<Pkt6> confirm) {
+    boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+                                           confirm->getTransid(),
+                                           Pkt6::UDP));
+    return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processRelease(boost::shared_ptr<Pkt6> release) {
+    boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+                                           release->getTransid(),
+                                           Pkt6::UDP));
+    return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processDecline(boost::shared_ptr<Pkt6> decline) {
+    boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+                                           decline->getTransid(),
+                                           Pkt6::UDP));
+    return reply;
+}
+
+boost::shared_ptr<Pkt6>
+Dhcpv6Srv::processInfRequest(boost::shared_ptr<Pkt6> infRequest) {
+    boost::shared_ptr<Pkt6> reply(new Pkt6(DHCPV6_REPLY,
+                                           infRequest->getTransid(),
+                                           Pkt6::UDP));
+    return reply;
+}
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
new file mode 100644
index 0000000..4daef3a
--- /dev/null
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -0,0 +1,156 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DHCPV6_SRV_H
+#define DHCPV6_SRV_H
+
+#include <boost/shared_ptr.hpp>
+#include <boost/noncopyable.hpp>
+#include "dhcp/pkt6.h"
+#include "dhcp/option.h"
+#include <iostream>
+
+namespace isc {
+
+namespace dhcp {
+/// @brief DHCPv6 server service.
+///
+/// This singleton class represents DHCPv6 server. It contains all
+/// top-level methods and routines necessary for server operation.
+/// In particular, it instantiates IfaceMgr, loads or generates DUID
+/// that is going to be used as server-identifier, receives incoming
+/// packets, processes them, manages leases assignment and generates
+/// appropriate responses.
+class Dhcpv6Srv : public boost::noncopyable {
+
+public:
+    /// @brief Default constructor.
+    ///
+    /// Instantiates necessary services, required to run DHCPv6 server.
+    /// In particular, creates IfaceMgr that will be responsible for
+    /// network interaction. Will instantiate lease manager, and load
+    /// old or create new DUID.
+    Dhcpv6Srv();
+
+    /// @brief Destructor. Used during DHCPv6 service shutdown.
+    ~Dhcpv6Srv();
+
+    /// @brief Returns server-intentifier option
+    ///
+    /// @return server-id option
+    boost::shared_ptr<isc::dhcp::Option>
+    getServerID() { return serverid_; }
+
+    /// @brief Main server processing loop.
+    ///
+    /// Main server processing loop. Receives incoming packets, verifies
+    /// their correctness, generates appropriate answer (if needed) and
+    /// transmits respones.
+    ///
+    /// @return true, if being shut down gracefully, fail if experienced
+    ///         critical error.
+    bool run();
+
+protected:
+    /// @brief Processes incoming SOLICIT and returns response.
+    ///
+    /// Processes received SOLICIT message and verifies that its sender
+    /// should be served. In particular IA, TA and PD options are populated
+    /// with to-be assinged addresses, temporary addresses and delegated
+    /// prefixes, respectively. In the usual 4 message exchange, server is
+    /// expected to respond with ADVERTISE message. However, if client
+    /// requests rapid-commit and server supports it, REPLY will be sent
+    /// instead of ADVERTISE and requested leases will be assigned
+    /// immediately.
+    ///
+    /// @param solicit SOLICIT message received from client
+    ///
+    /// @return ADVERTISE, REPLY message or NULL
+    boost::shared_ptr<Pkt6>
+    processSolicit(boost::shared_ptr<Pkt6> solicit);
+
+    /// @brief Processes incoming REQUEST and returns REPLY response.
+    ///
+    /// Processes incoming REQUEST message and verifies that its sender
+    /// should be served. In particular IA, TA and PD options are populated
+    /// with assinged addresses, temporary addresses and delegated
+    /// prefixes, respectively. Uses LeaseMgr to allocate or update existing
+    /// leases.
+    ///
+    /// @param request a message received from client
+    ///
+    /// @return REPLY message or NULL
+    boost::shared_ptr<Pkt6>
+    processRequest(boost::shared_ptr<Pkt6> request);
+
+    /// @brief Stub function that will handle incoming RENEW messages.
+    ///
+    /// @param renew message received from client
+    boost::shared_ptr<Pkt6>
+    processRenew(boost::shared_ptr<Pkt6> renew);
+
+    /// @brief Stub function that will handle incoming REBIND messages.
+    ///
+    /// @param rebind message received from client
+    boost::shared_ptr<Pkt6>
+    processRebind(boost::shared_ptr<Pkt6> rebind);
+
+    /// @brief Stub function that will handle incoming CONFIRM messages.
+    ///
+    /// @param confirm message received from client
+    boost::shared_ptr<Pkt6>
+    processConfirm(boost::shared_ptr<Pkt6> confirm);
+
+    /// @brief Stub function that will handle incoming RELEASE messages.
+    ///
+    /// @param release message received from client
+    boost::shared_ptr<Pkt6>
+    processRelease(boost::shared_ptr<Pkt6> release);
+
+    /// @brief Stub function that will handle incoming DECLINE messages.
+    ///
+    /// @param decline message received from client
+    boost::shared_ptr<Pkt6>
+    processDecline(boost::shared_ptr<Pkt6> decline);
+
+    /// @brief Stub function that will handle incoming INF-REQUEST messages.
+    ///
+    /// @param infRequest message received from client
+    boost::shared_ptr<Pkt6>
+    processInfRequest(boost::shared_ptr<Pkt6> infRequest);
+
+    /// @brief Sets server-identifier.
+    ///
+    /// This method attempts to set server-identifier DUID. It loads it
+    /// from a file. If file load fails, it generates new DUID using
+    /// interface link-layer addresses (EUI-64) + timestamp (DUID type
+    /// duid-llt, see RFC3315, section 9.2). If there are no suitable
+    /// interfaces present, exception it thrown
+    ///
+    /// @throws isc::Unexpected Failed to read DUID file and no suitable
+    ///         interfaces for new DUID generation are detected.
+    void setServerID();
+
+    /// server DUID (to be sent in server-identifier option)
+    boost::shared_ptr<isc::dhcp::Option> serverid_;
+
+    /// indicates if shutdown is in progress. Setting it to true will
+    /// initiate server shutdown procedure.
+    volatile bool shutdown;
+};
+
+}; // namespace isc::dhcp
+}; // namespace isc
+
+#endif // DHCP6_SRV_H
diff --git a/src/bin/dhcp6/iface_mgr.cc b/src/bin/dhcp6/iface_mgr.cc
new file mode 100644
index 0000000..a96db07
--- /dev/null
+++ b/src/bin/dhcp6/iface_mgr.cc
@@ -0,0 +1,542 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sstream>
+#include <fstream>
+#include <string.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp6/iface_mgr.h"
+#include "exceptions/exceptions.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+namespace isc {
+
+/// IfaceMgr is a singleton implementation
+IfaceMgr* IfaceMgr::instance_ = 0;
+
+void
+IfaceMgr::instanceCreate() {
+    if (instance_) {
+        // no need to do anything. Instance is already created.
+        // Who called it again anyway? Uh oh. Had to be us, as
+        // this is private method.
+        return;
+    }
+    instance_ = new IfaceMgr();
+}
+
+IfaceMgr&
+IfaceMgr::instance() {
+    if (instance_ == 0) {
+        instanceCreate();
+    }
+    return (*instance_);
+}
+
+IfaceMgr::Iface::Iface(const std::string& name, int ifindex)
+    :name_(name), ifindex_(ifindex), mac_len_(0) {
+
+    memset(mac_, 0, sizeof(mac_));
+}
+
+std::string
+IfaceMgr::Iface::getFullName() const {
+    ostringstream tmp;
+    tmp << name_ << "/" << ifindex_;
+    return (tmp.str());
+}
+
+std::string
+IfaceMgr::Iface::getPlainMac() const {
+    ostringstream tmp;
+    tmp.fill('0');
+    tmp << hex;
+    for (int i = 0; i < mac_len_; i++) {
+        tmp.width(2);
+        tmp << mac_[i];
+        if (i < mac_len_-1) {
+            tmp << ":";
+        }
+    }
+    return (tmp.str());
+}
+
+IfaceMgr::IfaceMgr()
+    :control_buf_len_(CMSG_SPACE(sizeof(struct in6_pktinfo))),
+     control_buf_(new char[control_buf_len_])
+{
+
+    cout << "IfaceMgr initialization." << endl;
+
+    try {
+        // required for sending/receiving packets
+        // let's keep it in front, just in case someone
+        // wants to send anything during initialization
+
+        // control_buf_ = boost::scoped_array<char>();
+
+        detectIfaces();
+
+        if (!openSockets()) {
+            isc_throw(Unexpected, "Failed to open/bind sockets.");
+        }
+    } catch (const std::exception& ex) {
+        cout << "IfaceMgr creation failed:" << ex.what() << endl;
+
+        // TODO Uncomment this (or call LOG_FATAL) once
+        // interface detection is implemented. Otherwise
+        // it is not possible to run tests in a portable
+        // way (see detectIfaces() method).
+        // throw ex;
+    }
+}
+
+IfaceMgr::~IfaceMgr() {
+    // control_buf_ is deleted automatically (scoped_ptr)
+    control_buf_len_ = 0;
+}
+
+void
+IfaceMgr::detectIfaces() {
+    string ifaceName, linkLocal;
+
+    // TODO do the actual detection. Currently interface detection is faked
+    //      by reading a text file.
+
+    cout << "Interface detection is not implemented yet. "
+         << "Reading interfaces.txt file instead." << endl;
+    cout << "Please use format: interface-name link-local-address" << endl;
+
+    try {
+        ifstream interfaces("interfaces.txt");
+
+        if (!interfaces.good()) {
+            cout << "Failed to read interfaces.txt file." << endl;
+            isc_throw(Unexpected, "Failed to read interfaces.txt");
+        }
+        interfaces >> ifaceName;
+        interfaces >> linkLocal;
+
+        cout << "Detected interface " << ifaceName << "/" << linkLocal << endl;
+
+        Iface iface(ifaceName, if_nametoindex( ifaceName.c_str() ) );
+        IOAddress addr(linkLocal);
+        iface.addrs_.push_back(addr);
+        ifaces_.push_back(iface);
+        interfaces.close();
+    } catch (const std::exception& ex) {
+        // TODO: deallocate whatever memory we used
+        // not that important, since this function is going to be
+        // thrown away as soon as we get proper interface detection
+        // implemented
+
+        // TODO Do LOG_FATAL here
+        std::cerr << "Interface detection failed." << std::endl;
+        throw ex;
+    }
+}
+
+bool
+IfaceMgr::openSockets() {
+    int sock;
+
+    for (IfaceLst::iterator iface=ifaces_.begin();
+         iface!=ifaces_.end();
+         ++iface) {
+
+        for (Addr6Lst::iterator addr=iface->addrs_.begin();
+             addr!=iface->addrs_.end();
+             ++addr) {
+
+            sock = openSocket(iface->name_, *addr,
+                              DHCP6_SERVER_PORT);
+            if (sock<0) {
+                cout << "Failed to open unicast socket." << endl;
+                return (false);
+            }
+            sendsock_ = sock;
+
+            sock = openSocket(iface->name_,
+                              IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
+                              DHCP6_SERVER_PORT);
+            if (sock<0) {
+                cout << "Failed to open multicast socket." << endl;
+                close(sendsock_);
+                return (false);
+            }
+            recvsock_ = sock;
+        }
+    }
+
+    return (true);
+}
+
+void
+IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
+    for (IfaceLst::const_iterator iface=ifaces_.begin();
+         iface!=ifaces_.end();
+         ++iface) {
+        out << "Detected interface " << iface->getFullName() << endl;
+        out << "  " << iface->addrs_.size() << " addr(s):" << endl;
+        for (Addr6Lst::const_iterator addr=iface->addrs_.begin();
+             addr != iface->addrs_.end();
+             ++addr) {
+            out << "  " << addr->toText() << endl;
+        }
+        out << "  mac: " << iface->getPlainMac() << endl;
+    }
+}
+
+IfaceMgr::Iface*
+IfaceMgr::getIface(int ifindex) {
+    for (IfaceLst::iterator iface=ifaces_.begin();
+         iface!=ifaces_.end();
+         ++iface) {
+        if (iface->ifindex_ == ifindex)
+            return (&(*iface));
+    }
+
+    return (NULL); // not found
+}
+
+IfaceMgr::Iface*
+IfaceMgr::getIface(const std::string& ifname) {
+    for (IfaceLst::iterator iface=ifaces_.begin();
+         iface!=ifaces_.end();
+         ++iface) {
+        if (iface->name_ == ifname)
+            return (&(*iface));
+    }
+
+    return (NULL); // not found
+}
+
+int
+IfaceMgr::openSocket(const std::string& ifname,
+                     const IOAddress& addr,
+                     int port) {
+    struct sockaddr_in6 addr6;
+
+    cout << "Creating socket on " << ifname << "/" << addr.toText()
+         << "/port=" << port << endl;
+
+    memset(&addr6, 0, sizeof(addr6));
+    addr6.sin6_family = AF_INET6;
+    addr6.sin6_port = htons(port);
+    addr6.sin6_scope_id = if_nametoindex(ifname.c_str());
+
+    memcpy(&addr6.sin6_addr,
+           addr.getAddress().to_v6().to_bytes().data(),
+           sizeof(addr6.sin6_addr));
+#ifdef HAVE_SA_LEN
+    addr6->sin6_len = sizeof(addr6);
+#endif
+
+    // TODO: use sockcreator once it becomes available
+
+    // make a socket
+    int sock = socket(AF_INET6, SOCK_DGRAM, 0);
+    if (sock < 0) {
+        cout << "Failed to create UDP6 socket." << endl;
+        return (-1);
+    }
+
+    /* Set the REUSEADDR option so that we don't fail to start if
+       we're being restarted. */
+    int flag = 1;
+    if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
+                   (char *)&flag, sizeof(flag)) < 0) {
+        cout << "Can't set SO_REUSEADDR option on dhcpv6 socket." << endl;
+        close(sock);
+        return (-1);
+    }
+
+    if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
+        cout << "Failed to bind socket " << sock << " to " << addr.toText()
+             << "/port=" << port << endl;
+        close(sock);
+        return (-1);
+    }
+#ifdef IPV6_RECVPKTINFO
+    /* RFC3542 - a new way */
+    if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
+                   &flag, sizeof(flag)) != 0) {
+        cout << "setsockopt: IPV6_RECVPKTINFO failed." << endl;
+        close(sock);
+        return (-1);
+    }
+#else
+    /* RFC2292 - an old way */
+    if (setsockopt(sock, IPPROTO_IPV6, IPV6_PKTINFO,
+                   &flag, sizeof(flag)) != 0) {
+        cout << "setsockopt: IPV6_PKTINFO: failed." << endl;
+        close(sock);
+        return (-1);
+    }
+#endif
+
+    // multicast stuff
+
+    if (addr.getAddress().to_v6().is_multicast()) {
+        // both mcast (ALL_DHCP_RELAY_AGENTS_AND_SERVERS and ALL_DHCP_SERVERS)
+        // are link and site-scoped, so there is no sense to join those groups
+        // with global addresses.
+
+        if ( !joinMcast( sock, ifname,
+                         string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+            close(sock);
+            return (-1);
+        }
+    }
+
+    cout << "Created socket " << sock << " on " << ifname << "/" <<
+        addr.toText() << "/port=" << port << endl;
+
+    return (sock);
+}
+
+bool
+IfaceMgr::joinMcast(int sock, const std::string& ifname,
+const std::string & mcast) {
+
+    struct ipv6_mreq mreq;
+
+    if (inet_pton(AF_INET6, mcast.c_str(),
+                  &mreq.ipv6mr_multiaddr) <= 0) {
+        cout << "Failed to convert " << ifname
+             << " to IPv6 multicast address." << endl;
+        return (false);
+    }
+
+    mreq.ipv6mr_interface = if_nametoindex(ifname.c_str());
+    if (setsockopt(sock, IPPROTO_IPV6, IPV6_JOIN_GROUP,
+                   &mreq, sizeof(mreq)) < 0) {
+        cout << "Failed to join " << mcast << " multicast group." << endl;
+        return (false);
+    }
+
+    cout << "Joined multicast " << mcast << " group." << endl;
+
+    return (true);
+}
+
+bool
+IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
+    struct msghdr m;
+    struct iovec v;
+    int result;
+    struct in6_pktinfo *pktinfo;
+    struct cmsghdr *cmsg;
+    memset(&control_buf_[0], 0, control_buf_len_);
+
+    /*
+     * Initialize our message header structure.
+     */
+    memset(&m, 0, sizeof(m));
+
+    /*
+     * Set the target address we're sending to.
+     */
+    sockaddr_in6 to;
+    memset(&to, 0, sizeof(to));
+    to.sin6_family = AF_INET6;
+    to.sin6_port = htons(pkt->remote_port_);
+    memcpy(&to.sin6_addr,
+           pkt->remote_addr_.getAddress().to_v6().to_bytes().data(),
+           16);
+    to.sin6_scope_id = pkt->ifindex_;
+
+    m.msg_name = &to;
+    m.msg_namelen = sizeof(to);
+
+    /*
+     * Set the data buffer we're sending. (Using this wacky
+     * "scatter-gather" stuff... we only have a single chunk
+     * of data to send, so we declare a single vector entry.)
+     */
+    v.iov_base = (char *) &pkt->data_[0];
+    v.iov_len = pkt->data_len_;
+    m.msg_iov = &v;
+    m.msg_iovlen = 1;
+
+    /*
+     * Setting the interface is a bit more involved.
+     *
+     * We have to create a "control message", and set that to
+     * define the IPv6 packet information. We could set the
+     * source address if we wanted, but we can safely let the
+     * kernel decide what that should be.
+     */
+    m.msg_control = &control_buf_[0];
+    m.msg_controllen = control_buf_len_;
+    cmsg = CMSG_FIRSTHDR(&m);
+    cmsg->cmsg_level = IPPROTO_IPV6;
+    cmsg->cmsg_type = IPV6_PKTINFO;
+    cmsg->cmsg_len = CMSG_LEN(sizeof(*pktinfo));
+    pktinfo = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+    memset(pktinfo, 0, sizeof(*pktinfo));
+    pktinfo->ipi6_ifindex = pkt->ifindex_;
+    m.msg_controllen = cmsg->cmsg_len;
+
+    result = sendmsg(sendsock_, &m, 0);
+    if (result < 0) {
+        cout << "Send packet failed." << endl;
+    }
+    cout << "Sent " << result << " bytes." << endl;
+
+    cout << "Sent " << pkt->data_len_ << " bytes over "
+         << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+         << " dst=" << pkt->remote_addr_.toText()
+         << ", src=" << pkt->local_addr_.toText()
+         << endl;
+
+    return (result);
+}
+
+boost::shared_ptr<Pkt6>
+IfaceMgr::receive() {
+    struct msghdr m;
+    struct iovec v;
+    int result;
+    struct cmsghdr* cmsg;
+    struct in6_pktinfo* pktinfo;
+    struct sockaddr_in6 from;
+    struct in6_addr to_addr;
+    boost::shared_ptr<Pkt6> pkt;
+    char addr_str[INET6_ADDRSTRLEN];
+
+    try {
+        // RFC3315 states that server responses may be
+        // fragmented if they are over MTU. There is no
+        // text whether client's packets may be larger
+        // than 1500. Nevertheless to be on the safe side
+        // we use larger buffer. This buffer limit is checked
+        // during reception (see iov_len below), so we are
+        // safe
+        pkt = boost::shared_ptr<Pkt6>(new Pkt6(65536));
+    } catch (const std::exception& ex) {
+        cout << "Failed to create new packet." << endl;
+        return (boost::shared_ptr<Pkt6>()); // NULL
+    }
+
+    memset(&control_buf_[0], 0, control_buf_len_);
+
+    memset(&from, 0, sizeof(from));
+    memset(&to_addr, 0, sizeof(to_addr));
+
+    /*
+     * Initialize our message header structure.
+     */
+    memset(&m, 0, sizeof(m));
+
+    /*
+     * Point so we can get the from address.
+     */
+    m.msg_name = &from;
+    m.msg_namelen = sizeof(from);
+
+    /*
+     * Set the data buffer we're receiving. (Using this wacky
+     * "scatter-gather" stuff... but we that doesn't really make
+     * sense for us, so we use a single vector entry.)
+     */
+    v.iov_base = (void*)&pkt->data_[0];
+    v.iov_len = pkt->data_len_;
+    m.msg_iov = &v;
+    m.msg_iovlen = 1;
+
+    /*
+     * Getting the interface is a bit more involved.
+     *
+     * We set up some space for a "control message". We have
+     * previously asked the kernel to give us packet
+     * information (when we initialized the interface), so we
+     * should get the destination address from that.
+     */
+    m.msg_control = &control_buf_[0];
+    m.msg_controllen = control_buf_len_;
+
+    result = recvmsg(recvsock_, &m, 0);
+
+    if (result >= 0) {
+        /*
+         * If we did read successfully, then we need to loop
+         * through the control messages we received and
+         * find the one with our destination address.
+         *
+         * We also keep a flag to see if we found it. If we
+         * didn't, then we consider this to be an error.
+         */
+        int found_pktinfo = 0;
+        cmsg = CMSG_FIRSTHDR(&m);
+        while (cmsg != NULL) {
+            if ((cmsg->cmsg_level == IPPROTO_IPV6) &&
+                (cmsg->cmsg_type == IPV6_PKTINFO)) {
+                pktinfo = (struct in6_pktinfo*)CMSG_DATA(cmsg);
+                to_addr = pktinfo->ipi6_addr;
+                pkt->ifindex_ = pktinfo->ipi6_ifindex;
+                found_pktinfo = 1;
+            }
+            cmsg = CMSG_NXTHDR(&m, cmsg);
+        }
+        if (!found_pktinfo) {
+            cout << "Unable to find pktinfo" << endl;
+            return (boost::shared_ptr<Pkt6>()); // NULL
+        }
+    } else {
+        cout << "Failed to receive data." << endl;
+        return (boost::shared_ptr<Pkt6>()); // NULL
+    }
+
+    // That's ugly.
+    // TODO add IOAddress constructor that will take struct in6_addr*
+    // TODO: there's from_bytes() method added in IOAddress. Use it!
+    inet_ntop(AF_INET6, &to_addr, addr_str,INET6_ADDRSTRLEN);
+    pkt->local_addr_ = IOAddress(string(addr_str));
+
+    // TODO: there's from_bytes() method added in IOAddress. Use it!
+    inet_ntop(AF_INET6, &from.sin6_addr, addr_str, INET6_ADDRSTRLEN);
+    pkt->remote_addr_ = IOAddress(string(addr_str));
+
+    pkt->remote_port_ = ntohs(from.sin6_port);
+
+    Iface* received = getIface(pkt->ifindex_);
+    if (received) {
+        pkt->iface_ = received->name_;
+    } else {
+        cout << "Received packet over unknown interface (ifindex="
+             << pkt->ifindex_ << ")." << endl;
+        return (boost::shared_ptr<Pkt6>()); // NULL
+    }
+
+    pkt->data_len_ = result;
+
+    // TODO Move this to LOG_DEBUG
+    cout << "Received " << pkt->data_len_ << " bytes over "
+         << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+         << " src=" << pkt->remote_addr_.toText()
+         << ", dst=" << pkt->local_addr_.toText()
+         << endl;
+
+    return (pkt);
+}
+
+}
diff --git a/src/bin/dhcp6/iface_mgr.h b/src/bin/dhcp6/iface_mgr.h
new file mode 100644
index 0000000..249c7ef
--- /dev/null
+++ b/src/bin/dhcp6/iface_mgr.h
@@ -0,0 +1,229 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef IFACE_MGR_H
+#define IFACE_MGR_H
+
+#include <list>
+#include <boost/shared_ptr.hpp>
+#include <boost/scoped_array.hpp>
+#include <boost/noncopyable.hpp>
+#include "asiolink/io_address.h"
+#include "dhcp/pkt6.h"
+
+namespace isc {
+
+namespace dhcp {
+/// @brief handles network interfaces, transmission and reception
+///
+/// IfaceMgr is an interface manager class that detects available network
+/// interfaces, configured addresses, link-local addresses, and provides
+/// API for using sockets.
+///
+class IfaceMgr : public boost::noncopyable {
+public:
+    /// type that defines list of addresses
+    typedef std::list<isc::asiolink::IOAddress> Addr6Lst;
+
+    /// maximum MAC address length (Infiniband uses 20 bytes)
+    static const unsigned int MAX_MAC_LEN = 20;
+
+    /// @brief represents a single network interface
+    ///
+    /// Iface structure represents network interface with all useful
+    /// information, like name, interface index, MAC address and
+    /// list of assigned addresses
+    struct Iface {
+        /// constructor
+        Iface(const std::string& name, int ifindex);
+
+        /// returns full interface name in format ifname/ifindex
+        std::string getFullName() const;
+
+        /// returns link-layer address a plain text
+        std::string getPlainMac() const;
+
+        /// network interface name
+        std::string name_;
+
+        /// interface index (a value that uniquely indentifies an interface)
+        int ifindex_;
+
+        /// list of assigned addresses
+        Addr6Lst addrs_;
+
+        /// link-layer address
+        uint8_t mac_[MAX_MAC_LEN];
+
+        /// length of link-layer address (usually 6)
+        int mac_len_;
+
+        /// socket used to sending data
+        int sendsock_;
+
+        /// socket used for receiving data
+        int recvsock_;
+    };
+
+    // TODO performance improvement: we may change this into
+    //      2 maps (ifindex-indexed and name-indexed) and
+    //      also hide it (make it public make tests easier for now)
+
+    /// type that holds a list of interfaces
+    typedef std::list<Iface> IfaceLst;
+
+    /// IfaceMgr is a singleton class. This method returns reference
+    /// to its sole instance.
+    ///
+    /// @return the only existing instance of interface manager
+    static IfaceMgr& instance();
+
+    /// @brief Returns interface with specified interface index
+    ///
+    /// @param ifindex index of searched interface
+    ///
+    /// @return interface with requested index (or NULL if no such
+    ///         interface is present)
+    ///
+    Iface*
+    getIface(int ifindex);
+
+    /// @brief Returns interface with specified interface name
+    ///
+    /// @param ifname name of searched interface
+    ///
+    /// @return interface with requested name (or NULL if no such
+    ///         interface is present)
+    ///
+    Iface*
+    getIface(const std::string& ifname);
+
+    /// debugging method that prints out all available interfaces
+    ///
+    /// @param out specifies stream to print list of interfaces to
+    void
+    printIfaces(std::ostream& out = std::cout);
+
+    /// @brief Sends a packet.
+    ///
+    /// Sends a packet. All parameters for actual transmission are specified in
+    /// Pkt6 structure itself. That includes destination address, src/dst port
+    /// and interface over which data will be sent.
+    ///
+    /// @param pkt packet to be sent
+    ///
+    /// @return true if sending was successful
+    bool
+    send(boost::shared_ptr<Pkt6>& pkt);
+
+    /// @brief Tries to receive packet over open sockets.
+    ///
+    /// Attempts to receive a single packet of any of the open sockets.
+    /// If reception is successful and all information about its sender
+    /// are obtained, Pkt6 object is created and returned.
+    ///
+    /// TODO Start using select() and add timeout to be able
+    /// to not wait infinitely, but rather do something useful
+    /// (e.g. remove expired leases)
+    ///
+    /// @return Pkt6 object representing received packet (or NULL)
+    boost::shared_ptr<Pkt6> receive();
+
+    // don't use private, we need derived classes in tests
+protected:
+
+    /// @brief Protected constructor.
+    ///
+    /// Protected constructor. This is a singleton class. We don't want
+    /// anyone to create instances of IfaceMgr. Use instance() method
+    IfaceMgr();
+
+    ~IfaceMgr();
+
+    /// @brief Detects network interfaces.
+    ///
+    /// This method will eventually detect available interfaces. For now
+    /// it offers stub implementation. First interface name and link-local
+    /// IPv6 address is read from intefaces.txt file.
+    void
+    detectIfaces();
+
+    ///
+    /// Opens UDP/IPv6 socket and binds it to address, interface and port.
+    ///
+    /// @param ifname name of the interface
+    /// @param addr address to be bound.
+    /// @param port UDP port.
+    ///
+    /// @return socket descriptor, if socket creation, binding and multicast
+    /// group join were all successful. -1 otherwise.
+    int openSocket(const std::string& ifname,
+                   const isc::asiolink::IOAddress& addr,
+                   int port);
+
+    // TODO: having 2 maps (ifindex->iface and ifname->iface would)
+    //      probably be better for performance reasons
+
+    /// List of available interfaces
+    IfaceLst ifaces_;
+
+    /// a pointer to a sole instance of this class (a singleton)
+    static IfaceMgr * instance_;
+
+    // TODO: Also keep this interface on Iface once interface detection
+    // is implemented. We may need it e.g. to close all sockets on
+    // specific interface
+    int recvsock_; // TODO: should be fd_set eventually, but we have only
+    int sendsock_; // 2 sockets for now. Will do for until next release
+    // we can't use the same socket, as receiving socket
+    // is bound to multicast address. And we all know what happens
+    // to people who try to use multicast as source address.
+
+    /// length of the control_buf_ array
+    int control_buf_len_;
+
+    /// control-buffer, used in transmission and reception
+    boost::scoped_array<char> control_buf_;
+
+private:
+    /// Opens sockets on detected interfaces.
+    bool
+    openSockets();
+
+    /// creates a single instance of this class (a singleton implementation)
+    static void
+    instanceCreate();
+
+    /// @brief Joins IPv6 multicast group on a socket.
+    ///
+    /// Socket must be created and bound to an address. Note that this
+    /// address is different than the multicast address. For example DHCPv6
+    /// server should bind its socket to link-local address (fe80::1234...)
+    /// and later join ff02::1:2 multicast group.
+    ///
+    /// @param sock socket fd (socket must be bound)
+    /// @param ifname interface name (for link-scoped multicast groups)
+    /// @param mcast multicast address to join (e.g. "ff02::1:2")
+    ///
+    /// @return true if multicast join was successful
+    ///
+    bool
+    joinMcast(int sock, const std::string& ifname,
+              const std::string& mcast);
+};
+
+}; // namespace isc::dhcp
+}; // namespace isc
+
+#endif
diff --git a/src/bin/dhcp6/interfaces.txt b/src/bin/dhcp6/interfaces.txt
new file mode 100644
index 0000000..6a64309
--- /dev/null
+++ b/src/bin/dhcp6/interfaces.txt
@@ -0,0 +1,10 @@
+eth0 fe80::21e:8cff:fe9b:7349
+
+#
+# only first line is read.
+# please use following format:
+# interface-name link-local-ipv6-address
+#
+# This file will become obsolete once proper interface detection 
+# is implemented.
+#
diff --git a/src/bin/dhcp6/main.cc b/src/bin/dhcp6/main.cc
index 75af3d9..5323811 100644
--- a/src/bin/dhcp6/main.cc
+++ b/src/bin/dhcp6/main.cc
@@ -26,21 +26,23 @@
 #include <iostream>
 
 #include <exceptions/exceptions.h>
+#if 0
+// TODO cc is not used yet. It should be eventually
 #include <cc/session.h>
 #include <config/ccsession.h>
+#endif
 
 #include <util/buffer.h>
 #include <log/dummylog.h>
 
 #include <dhcp6/spec_config.h>
-
+#include "dhcp6/dhcp6_srv.h"
 
 using namespace std;
 using namespace isc::util;
-using namespace isc::data;
-using namespace isc::cc;
-using namespace isc::config;
-using namespace isc::util;
+
+using namespace isc;
+using namespace isc::dhcp;
 
 namespace {
 
@@ -48,9 +50,8 @@ bool verbose_mode = false;
 
 void
 usage() {
-    cerr << "Usage:  b10-dhcp6 [-u user] [-v]"
+    cerr << "Usage:  b10-dhcp6 [-v]"
          << endl;
-    cerr << "\t-u: change process UID to the specified user" << endl;
     cerr << "\t-v: verbose output" << endl;
     exit(1);
 }
@@ -59,40 +60,32 @@ usage() {
 int
 main(int argc, char* argv[]) {
     int ch;
-    const char* uid = NULL;
-    bool cache = true;
 
-    while ((ch = getopt(argc, argv, ":nu:v")) != -1) {
+    while ((ch = getopt(argc, argv, ":v")) != -1) {
         switch (ch) {
-        case 'n':
-            cache = false;
-            break;
-        case 'u':
-            uid = optarg;
-            break;
         case 'v':
             verbose_mode = true;
             isc::log::denabled = true;
             break;
-        case '?':
+        case ':':
         default:
             usage();
         }
     }
 
+    cout << "My pid=" << getpid() << endl;
+
     if (argc - optind > 0) {
         usage();
     }
 
     int ret = 0;
 
-    // XXX: we should eventually pass io_service here.
+    // TODO remainder of auth to dhcp6 code copy. We need to enable this in
+    //      dhcp6 eventually
 #if 0
     Session* cc_session = NULL;
-    Session* xfrin_session = NULL;
     Session* statistics_session = NULL;
-    bool xfrin_session_established = false; // XXX (see Trac #287)
-    bool statistics_session_established = false; // XXX (see Trac #287)
     ModuleCCSession* config_session = NULL;
 #endif
     try {
@@ -104,19 +97,16 @@ main(int argc, char* argv[]) {
             specfile = string(DHCP6_SPECFILE_LOCATION);
         }
 
-        // auth_server = new AuthSrv(cache, xfrout_client);
-        // auth_server->setVerbose(verbose_mode);
         cout << "[b10-dhcp6] Initiating DHCPv6 operation." << endl;
 
+        Dhcpv6Srv* srv = new Dhcpv6Srv();
+
+        srv->run();
+
     } catch (const std::exception& ex) {
         cerr << "[b10-dhcp6] Server failed: " << ex.what() << endl;
         ret = 1;
     }
 
-    while (true) {
-            sleep(10);
-            cout << "[b10-dhcp6] I'm alive." << endl;
-    }
-
     return (ret);
 }
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index a35284f..985368e 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -8,15 +8,57 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/bind10 \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
 	BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
 		$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
+
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += -I$(top_builddir)/src/bin # for generated spec_config.h header
+AM_CPPFLAGS += -I$(top_srcdir)/src/bin
+AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
+AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/bin/dhcp6/tests\"
+AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
+
+CLEANFILES = $(builddir)/interfaces.txt
+
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+TESTS =
+if HAVE_GTEST
+
+TESTS += dhcp6_unittests
+
+dhcp6_unittests_SOURCES = ../iface_mgr.h ../iface_mgr.cc
+dhcp6_unittests_SOURCES += ../dhcp6_srv.h ../dhcp6_srv.cc
+dhcp6_unittests_SOURCES += dhcp6_unittests.cc
+dhcp6_unittests_SOURCES += iface_mgr_unittest.cc
+dhcp6_unittests_SOURCES += dhcp6_srv_unittest.cc
+
+dhcp6_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+dhcp6_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+dhcp6_unittests_LDADD = $(GTEST_LDADD)
+dhcp6_unittests_LDADD += $(SQLITE_LIBS)
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
new file mode 100644
index 0000000..72e48e4
--- /dev/null
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -0,0 +1,148 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp6/dhcp6_srv.h"
+#include "dhcp/option6_ia.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+
+// namespace has to be named, because friends are defined in Dhcpv6Srv class
+// Maybe it should be isc::test?
+namespace test {
+
+class NakedDhcpv6Srv: public Dhcpv6Srv {
+    // "naked" Interface Manager, exposes internal fields
+public:
+    NakedDhcpv6Srv() { }
+
+    boost::shared_ptr<Pkt6>
+    processSolicit(boost::shared_ptr<Pkt6>& request) {
+        return Dhcpv6Srv::processSolicit(request);
+    }
+    boost::shared_ptr<Pkt6>
+    processRequest(boost::shared_ptr<Pkt6>& request) {
+        return Dhcpv6Srv::processRequest(request);
+    }
+};
+
+class Dhcpv6SrvTest : public ::testing::Test {
+public:
+    Dhcpv6SrvTest() {
+    }
+};
+
+TEST_F(Dhcpv6SrvTest, basic) {
+    // there's almost no code now. What's there provides echo capability
+    // that is just a proof of concept and will be removed soon
+    // No need to thoroughly test it
+
+    // srv has stubbed interface detection. It will read
+    // interfaces.txt instead. It will pretend to have detected
+    // fe80::1234 link-local address on eth0 interface. Obviously
+    // an attempt to bind this socket will fail.
+    EXPECT_NO_THROW( {
+        Dhcpv6Srv * srv = new Dhcpv6Srv();
+
+        delete srv;
+        });
+
+}
+
+TEST_F(Dhcpv6SrvTest, Solicit_basic) {
+    NakedDhcpv6Srv * srv = 0;
+    EXPECT_NO_THROW( srv = new NakedDhcpv6Srv(); );
+
+    // a dummy content for client-id
+    boost::shared_array<uint8_t> clntDuid(new uint8_t[32]);
+    for (int i=0; i<32; i++)
+        clntDuid[i] = 100+i;
+
+    boost::shared_ptr<Pkt6> sol =
+        boost::shared_ptr<Pkt6>(new Pkt6(DHCPV6_SOLICIT,
+                                         1234, Pkt6::UDP));
+
+    boost::shared_ptr<Option6IA> ia =
+        boost::shared_ptr<Option6IA>(new Option6IA(D6O_IA_NA, 234));
+    ia->setT1(1501);
+    ia->setT2(2601);
+    sol->addOption(ia);
+
+    // Let's not send address in solicit yet
+    // boost::shared_ptr<Option6IAAddr> addr(new Option6IAAddr(D6O_IAADDR,
+    //    IOAddress("2001:db8:1234:ffff::ffff"), 5001, 7001));
+    // ia->addOption(addr);
+    // sol->addOption(ia);
+
+    // constructed very simple SOLICIT message with:
+    // - client-id option (mandatory)
+    // - IA option (a request for address, without any addresses)
+
+    // expected returned ADVERTISE message:
+    // - copy of client-id
+    // - server-id
+    // - IA that includes IAADDR
+
+    boost::shared_ptr<Option> clientid =
+        boost::shared_ptr<Option>(new Option(Option::V6, D6O_CLIENTID,
+                                             clntDuid, 0, 16));
+    sol->addOption(clientid);
+
+    boost::shared_ptr<Pkt6> reply = srv->processSolicit(sol);
+
+    // check if we get response at all
+    ASSERT_TRUE( reply != boost::shared_ptr<Pkt6>() );
+
+    EXPECT_EQ( DHCPV6_ADVERTISE, reply->getType() );
+    EXPECT_EQ( 1234, reply->getTransid() );
+
+    boost::shared_ptr<Option> tmp = reply->getOption(D6O_IA_NA);
+    ASSERT_TRUE( tmp );
+
+    Option6IA * reply_ia = dynamic_cast<Option6IA*> ( tmp.get() );
+    EXPECT_EQ( 234, reply_ia->getIAID() );
+
+    // check that there's an address included
+    EXPECT_TRUE( reply_ia->getOption(D6O_IAADDR));
+
+    // check that server included our own client-id
+    tmp = reply->getOption(D6O_CLIENTID);
+    ASSERT_TRUE( tmp );
+    EXPECT_EQ(clientid->getType(), tmp->getType() );
+    ASSERT_EQ(clientid->len(), tmp->len() );
+
+    EXPECT_TRUE( clientid->getData() == tmp->getData() );
+
+    // check that server included its server-id
+    tmp = reply->getOption(D6O_SERVERID);
+    EXPECT_EQ(tmp->getType(), srv->getServerID()->getType() );
+    ASSERT_EQ(tmp->len(),  srv->getServerID()->len() );
+
+    EXPECT_TRUE(tmp->getData() == srv->getServerID()->getData());
+
+    // more checks to be implemented
+    delete srv;
+
+}
+
+}
diff --git a/src/bin/dhcp6/tests/dhcp6_test.py b/src/bin/dhcp6/tests/dhcp6_test.py
index 61ec009..5ae1f5e 100644
--- a/src/bin/dhcp6/tests/dhcp6_test.py
+++ b/src/bin/dhcp6/tests/dhcp6_test.py
@@ -13,7 +13,7 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
-from bind10 import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
+from bind10_src import ProcessInfo, parse_args, dump_pid, unlink_pid_file, _BASETIME
 
 import unittest
 import sys
diff --git a/src/bin/dhcp6/tests/dhcp6_unittests.cc b/src/bin/dhcp6/tests/dhcp6_unittests.cc
new file mode 100644
index 0000000..360fb71
--- /dev/null
+++ b/src/bin/dhcp6/tests/dhcp6_unittests.cc
@@ -0,0 +1,28 @@
+// Copyright (C) 2009  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdio.h>
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+
+int
+main(int argc, char* argv[]) {
+
+    ::testing::InitGoogleTest(&argc, argv);
+    isc::log::initLogger();
+
+    int result = RUN_ALL_TESTS();
+
+    return result;
+}
diff --git a/src/bin/dhcp6/tests/iface_mgr_unittest.cc b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
new file mode 100644
index 0000000..f126e6a
--- /dev/null
+++ b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
@@ -0,0 +1,367 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <fstream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include "io_address.h"
+#include "dhcp/pkt6.h"
+#include "dhcp6/iface_mgr.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+// name of loopback interface detection
+char LOOPBACK[32] = "lo";
+
+namespace {
+const char* const INTERFACE_FILE = TEST_DATA_BUILDDIR "/interfaces.txt";
+
+class NakedIfaceMgr: public IfaceMgr {
+    // "naked" Interface Manager, exposes internal fields
+public:
+    NakedIfaceMgr() { }
+    IfaceLst & getIfacesLst() { return ifaces_; }
+    void setSendSock(int sock) { sendsock_ = sock; }
+    void setRecvSock(int sock) { recvsock_ = sock; }
+
+    int openSocket(const std::string& ifname,
+                   const isc::asiolink::IOAddress& addr,
+                   int port) {
+        return IfaceMgr::openSocket(ifname, addr, port);
+    }
+
+};
+
+// dummy class for now, but this will be expanded when needed
+class IfaceMgrTest : public ::testing::Test {
+public:
+    IfaceMgrTest() {
+    }
+};
+
+// We need some known interface to work reliably. Loopback interface
+// is named lo on Linux and lo0 on BSD boxes. We need to find out
+// which is available. This is not a real test, but rather a workaround
+// that will go away when interface detection is implemented.
+
+// NOTE: At this stage of development, write access to current directory
+// during running tests is required.
+TEST_F(IfaceMgrTest, loDetect) {
+
+    // poor man's interface detection
+    // it will go away as soon as proper interface detection
+    // is implemented
+    if (if_nametoindex("lo")>0) {
+        cout << "This is Linux, using lo as loopback." << endl;
+        sprintf(LOOPBACK, "lo");
+    } else if (if_nametoindex("lo0")>0) {
+        cout << "This is BSD, using lo0 as loopback." << endl;
+        sprintf(LOOPBACK, "lo0");
+    } else {
+        cout << "Failed to detect loopback interface. Neither "
+             << "lo or lo0 worked. I give up." << endl;
+        ASSERT_TRUE(false);
+    }
+}
+
+// uncomment this test to create packet writer. It will
+// write incoming DHCPv6 packets as C arrays. That is useful
+// for generating test sequences based on actual traffic
+//
+// TODO: this potentially should be moved to a separate tool
+//
+
+#if 0
+TEST_F(IfaceMgrTest, dhcp6Sniffer) {
+    // testing socket operation in a portable way is tricky
+    // without interface detection implemented
+
+    unlink("interfaces.txt");
+
+    ofstream interfaces("interfaces.txt", ios::ate);
+    interfaces << "eth0 fe80::21e:8cff:fe9b:7349";
+    interfaces.close();
+
+    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+    Pkt6 * pkt = 0;
+    int cnt = 0;
+    cout << "---8X-----------------------------------------" << endl;
+    while (true) {
+        pkt = ifacemgr->receive();
+
+        cout << "// Received " << pkt->data_len_ << " bytes packet:" << endl;
+        cout << "Pkt6 *capture" << cnt++ << "() {" << endl;
+        cout << "    Pkt6* pkt;" << endl;
+        cout << "    pkt = new Pkt6(" << pkt->data_len_ << ");" << endl;
+        cout << "    pkt->remote_port_ = " << pkt-> remote_port_ << ";" << endl;
+        cout << "    pkt->remote_addr_ = IOAddress(\""
+             << pkt->remote_addr_.toText() << "\");" << endl;
+        cout << "    pkt->local_port_ = " << pkt-> local_port_ << ";" << endl;
+        cout << "    pkt->local_addr_ = IOAddress(\""
+             << pkt->local_addr_.toText() << "\");" << endl;
+        cout << "    pkt->ifindex_ = " << pkt->ifindex_ << ";" << endl;
+        cout << "    pkt->iface_ = \"" << pkt->iface_ << "\";" << endl;
+
+        // TODO it is better to declare an array and then memcpy it to
+        // packet.
+        for (int i=0; i< pkt->data_len_; i++) {
+            cout << "    pkt->data_[" << i << "]="
+                 << (int)(unsigned char)pkt->data_[i] << "; ";
+            if (!(i%4))
+                cout << endl;
+        }
+        cout << endl;
+        cout << "    return (pkt);" << endl;
+        cout << "}" << endl << endl;
+
+        delete pkt;
+    }
+    cout << "---8X-----------------------------------------" << endl;
+
+    // never happens. Infinite loop is infinite
+    delete pkt;
+    delete ifacemgr;
+}
+#endif
+
+TEST_F(IfaceMgrTest, basic) {
+    // checks that IfaceManager can be instantiated
+
+    IfaceMgr & ifacemgr = IfaceMgr::instance();
+    ASSERT_TRUE(&ifacemgr != 0);
+}
+
+TEST_F(IfaceMgrTest, ifaceClass) {
+    // basic tests for Iface inner class
+
+    IfaceMgr::Iface * iface = new IfaceMgr::Iface("eth5", 7);
+
+    EXPECT_STREQ("eth5/7", iface->getFullName().c_str());
+
+    delete iface;
+
+}
+
+// TODO: Implement getPlainMac() test as soon as interface detection
+// is implemented.
+TEST_F(IfaceMgrTest, getIface) {
+
+    cout << "Interface checks. Please ignore socket binding errors." << endl;
+    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+    // interface name, ifindex
+    IfaceMgr::Iface iface1("lo1", 1);
+    IfaceMgr::Iface iface2("eth5", 2);
+    IfaceMgr::Iface iface3("en3", 5);
+    IfaceMgr::Iface iface4("e1000g0", 3);
+
+    // note: real interfaces may be detected as well
+    ifacemgr->getIfacesLst().push_back(iface1);
+    ifacemgr->getIfacesLst().push_back(iface2);
+    ifacemgr->getIfacesLst().push_back(iface3);
+    ifacemgr->getIfacesLst().push_back(iface4);
+
+    cout << "There are " << ifacemgr->getIfacesLst().size()
+         << " interfaces." << endl;
+    for (IfaceMgr::IfaceLst::iterator iface=ifacemgr->getIfacesLst().begin();
+         iface != ifacemgr->getIfacesLst().end();
+         ++iface) {
+        cout << "  " << iface->name_ << "/" << iface->ifindex_ << endl;
+    }
+
+
+    // check that interface can be retrieved by ifindex
+    IfaceMgr::Iface * tmp = ifacemgr->getIface(5);
+    // ASSERT_NE(NULL, tmp); is not supported. hmmmm.
+    ASSERT_TRUE( tmp != NULL );
+
+    EXPECT_STREQ( "en3", tmp->name_.c_str() );
+    EXPECT_EQ(5, tmp->ifindex_);
+
+    // check that interface can be retrieved by name
+    tmp = ifacemgr->getIface("lo1");
+    ASSERT_TRUE( tmp != NULL );
+
+    EXPECT_STREQ( "lo1", tmp->name_.c_str() );
+    EXPECT_EQ(1, tmp->ifindex_);
+
+    // check that non-existing interfaces are not returned
+    EXPECT_EQ(static_cast<void*>(NULL), ifacemgr->getIface("wifi0") );
+
+    delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, detectIfaces) {
+
+    // test detects that interfaces can be detected
+    // there is no code for that now, but interfaces are
+    // read from file
+    fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+    fakeifaces << "eth0 fe80::1234";
+    fakeifaces.close();
+
+    // this is not usable on systems that don't have eth0
+    // interfaces. Nevertheless, this fake interface should
+    // be on list, but if_nametoindex() will fail.
+
+    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+    ASSERT_TRUE( ifacemgr->getIface("eth0") != NULL );
+
+    IfaceMgr::Iface * eth0 = ifacemgr->getIface("eth0");
+
+    // there should be one address
+    EXPECT_EQ(1, eth0->addrs_.size());
+
+    IOAddress * addr = &(*eth0->addrs_.begin());
+    ASSERT_TRUE( addr != NULL );
+
+    EXPECT_STREQ( "fe80::1234", addr->toText().c_str() );
+
+    delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+// Fix for this is available on 1186 branch, will reenable
+// this test once 1186 is merged
+TEST_F(IfaceMgrTest, DISABLED_sockets) {
+    // testing socket operation in a portable way is tricky
+    // without interface detection implemented
+
+    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+    IOAddress loAddr("::1");
+
+    // bind multicast socket to port 10547
+    int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+    EXPECT_GT(socket1, 0); // socket > 0
+
+    // bind unicast socket to port 10548
+    int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10548);
+    EXPECT_GT(socket2, 0);
+
+    // expect success. This address/port is already bound, but
+    // we are using SO_REUSEADDR, so we can bind it twice
+    int socket3 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+
+    // rebinding succeeds on Linux, fails on BSD
+    // TODO: add OS-specific defines here (or modify code to
+    // behave the same way on all OSes, but that may not be
+    // possible
+    // EXPECT_GT(socket3, 0); // socket > 0
+
+    // we now have 3 sockets open at the same time. Looks good.
+
+    close(socket1);
+    close(socket2);
+    close(socket3);
+
+    delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
+    // testing socket operation in a portable way is tricky
+    // without interface detection implemented
+
+    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+    IOAddress loAddr("::1");
+    IOAddress mcastAddr("ff02::1:2");
+
+    // bind multicast socket to port 10547
+    int socket1 = ifacemgr->openSocket(LOOPBACK, mcastAddr, 10547);
+    EXPECT_GT(socket1, 0); // socket > 0
+
+    // expect success. This address/port is already bound, but
+    // we are using SO_REUSEADDR, so we can bind it twice
+    int socket2 = ifacemgr->openSocket(LOOPBACK, mcastAddr, 10547);
+    EXPECT_GT(socket2, 0);
+
+    // there's no good way to test negative case here.
+    // we would need non-multicast interface. We will be able
+    // to iterate thru available interfaces and check if there
+    // are interfaces without multicast-capable flag.
+
+    close(socket1);
+    close(socket2);
+
+    delete ifacemgr;
+}
+
+// TODO: disabled due to other naming on various systems
+// (lo in Linux, lo0 in BSD systems)
+// Fix for this is available on 1186 branch, will reenable
+// this test once 1186 is merged
+TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
+    // testing socket operation in a portable way is tricky
+    // without interface detection implemented
+
+    fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+    fakeifaces << LOOPBACK << " ::1";
+    fakeifaces.close();
+
+    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+
+    // let's assume that every supported OS have lo interface
+    IOAddress loAddr("::1");
+    int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+    int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+
+    ifacemgr->setSendSock(socket2);
+    ifacemgr->setRecvSock(socket1);
+
+    boost::shared_ptr<Pkt6> sendPkt(new Pkt6(128) );
+
+    // prepare dummy payload
+    for (int i=0;i<128; i++) {
+        sendPkt->data_[i] = i;
+    }
+
+    sendPkt->remote_port_ = 10547;
+    sendPkt->remote_addr_ = IOAddress("::1");
+    sendPkt->ifindex_ = 1;
+    sendPkt->iface_ = LOOPBACK;
+
+    boost::shared_ptr<Pkt6> rcvPkt;
+
+    EXPECT_EQ(true, ifacemgr->send(sendPkt));
+
+    rcvPkt = ifacemgr->receive();
+
+    ASSERT_TRUE( rcvPkt ); // received our own packet
+
+    // let's check that we received what was sent
+    EXPECT_EQ(sendPkt->data_len_, rcvPkt->data_len_);
+    EXPECT_EQ(0, memcmp(&sendPkt->data_[0], &rcvPkt->data_[0],
+                        rcvPkt->data_len_) );
+
+    EXPECT_EQ(sendPkt->remote_addr_.toText(), rcvPkt->remote_addr_.toText());
+    EXPECT_EQ(rcvPkt->remote_port_, 10546);
+
+    delete ifacemgr;
+}
+
+}
diff --git a/src/bin/host/Makefile.am b/src/bin/host/Makefile.am
index ec34ce7..a8f96c2 100644
--- a/src/bin/host/Makefile.am
+++ b/src/bin/host/Makefile.am
@@ -13,6 +13,7 @@ CLEANFILES = *.gcno *.gcda
 bin_PROGRAMS = b10-host
 b10_host_SOURCES = host.cc
 b10_host_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+b10_host_LDADD += $(top_builddir)/src/lib/util/libutil.la
 b10_host_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 
 man_MANS = b10-host.1
diff --git a/src/bin/host/b10-host.1 b/src/bin/host/b10-host.1
index ed0068b..050f6a3 100644
--- a/src/bin/host/b10-host.1
+++ b/src/bin/host/b10-host.1
@@ -103,10 +103,6 @@ It doesn\'t use
 at this time\&. The default name server used is 127\&.0\&.0\&.1\&.
 .PP
 
-\fBb10\-host\fR
-does not do reverse lookups by default yet (by detecting if name is a IPv4 or IPv6 address)\&.
-.PP
-
 \fB\-p\fR
 is not a standard feature\&.
 .SH "HISTORY"
diff --git a/src/bin/host/b10-host.xml b/src/bin/host/b10-host.xml
index 7da07dd..a17ef67 100644
--- a/src/bin/host/b10-host.xml
+++ b/src/bin/host/b10-host.xml
@@ -176,11 +176,6 @@
     </para>
 
     <para>
-      <command>b10-host</command> does not do reverse lookups by
-      default yet (by detecting if name is a IPv4 or IPv6 address).
-    </para>
-
-    <para>
       <option>-p</option> is not a standard feature.
     </para>
   </refsect1>
diff --git a/src/bin/loadzone/Makefile.am b/src/bin/loadzone/Makefile.am
index 74d4dd4..a235d68 100644
--- a/src/bin/loadzone/Makefile.am
+++ b/src/bin/loadzone/Makefile.am
@@ -1,5 +1,6 @@
 SUBDIRS = . tests/correct tests/error
 bin_SCRIPTS = b10-loadzone
+noinst_SCRIPTS = run_loadzone.sh
 
 CLEANFILES = b10-loadzone
 
diff --git a/src/bin/loadzone/run_loadzone.sh.in b/src/bin/loadzone/run_loadzone.sh.in
index 95de396..43b7920 100755
--- a/src/bin/loadzone/run_loadzone.sh.in
+++ b/src/bin/loadzone/run_loadzone.sh.in
@@ -18,14 +18,14 @@
 PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
 export PYTHON_EXEC
 
-PYTHONPATH=@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python
 export PYTHONPATH
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
 if test $SET_ENV_LIBRARY_PATH = yes; then
-	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:$@ENV_LIBRARY_PATH@
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
 	export @ENV_LIBRARY_PATH@
 fi
 
diff --git a/src/bin/loadzone/tests/correct/Makefile.am b/src/bin/loadzone/tests/correct/Makefile.am
index 3507bfa..fb882ba 100644
--- a/src/bin/loadzone/tests/correct/Makefile.am
+++ b/src/bin/loadzone/tests/correct/Makefile.am
@@ -13,11 +13,13 @@ EXTRA_DIST += ttl2.db
 EXTRA_DIST += ttlext.db
 EXTRA_DIST += example.db
 
+noinst_SCRIPTS = correct_test.sh
+
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # TODO: maybe use TESTS?
diff --git a/src/bin/loadzone/tests/correct/correct_test.sh.in b/src/bin/loadzone/tests/correct/correct_test.sh.in
old mode 100644
new mode 100755
index 509d8e5..d944451
--- a/src/bin/loadzone/tests/correct/correct_test.sh.in
+++ b/src/bin/loadzone/tests/correct/correct_test.sh.in
@@ -18,7 +18,7 @@
 PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
 export PYTHON_EXEC
 
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
 export PYTHONPATH
 
 LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/loadzone/tests/error/Makefile.am b/src/bin/loadzone/tests/error/Makefile.am
index 87bb1cf..03263b7 100644
--- a/src/bin/loadzone/tests/error/Makefile.am
+++ b/src/bin/loadzone/tests/error/Makefile.am
@@ -12,11 +12,13 @@ EXTRA_DIST += keyerror3.db
 EXTRA_DIST += originerr1.db
 EXTRA_DIST += originerr2.db
 
+noinst_SCRIPTS = error_test.sh
+
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # TODO: use TESTS ?
diff --git a/src/bin/loadzone/tests/error/error_test.sh.in b/src/bin/loadzone/tests/error/error_test.sh.in
old mode 100644
new mode 100755
index d1d6bd1..94c5edb
--- a/src/bin/loadzone/tests/error/error_test.sh.in
+++ b/src/bin/loadzone/tests/error/error_test.sh.in
@@ -18,7 +18,7 @@
 PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
 export PYTHON_EXEC
 
-PYTHONPATH=@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
+PYTHONPATH=@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_srcdir@/src/lib/python:@abs_top_builddir@/src/lib/python
 export PYTHONPATH
 
 LOADZONE_PATH=@abs_top_builddir@/src/bin/loadzone
diff --git a/src/bin/msgq/Makefile.am b/src/bin/msgq/Makefile.am
index 0eebf00..908cab5 100644
--- a/src/bin/msgq/Makefile.am
+++ b/src/bin/msgq/Makefile.am
@@ -1,7 +1,7 @@
 SUBDIRS = . tests
 
 pkglibexecdir = $(libexecdir)/@PACKAGE@
- 
+
 pkglibexec_SCRIPTS = b10-msgq
 
 CLEANFILES = b10-msgq msgq.pyc
diff --git a/src/bin/msgq/msgq.py.in b/src/bin/msgq/msgq.py.in
index 06fe840..333ae89 100755
--- a/src/bin/msgq/msgq.py.in
+++ b/src/bin/msgq/msgq.py.in
@@ -28,7 +28,6 @@ import struct
 import errno
 import time
 import select
-import pprint
 import random
 from optparse import OptionParser, OptionValueError
 import isc.util.process
@@ -96,10 +95,10 @@ class MsgQ:
                                "@PACKAGE_NAME@",
                                "msgq_socket").replace("${prefix}",
                                                       "@prefix@")
-    
+
     def __init__(self, socket_file=None, verbose=False):
         """Initialize the MsgQ master.
-        
+
         The socket_file specifies the path to the UNIX domain socket
         that the msgq process listens on. If it is None, the
         environment variable BIND10_MSGQ_SOCKET_FILE is used. If that
@@ -135,7 +134,7 @@ class MsgQ:
             self.poller = select.poll()
         except AttributeError:
             self.kqueue = select.kqueue()
-    
+
     def add_kqueue_socket(self, socket, write_filter=False):
         """Add a kquque filter for a socket.  By default the read
         filter is used; if write_filter is set to True, the write
@@ -167,7 +166,7 @@ class MsgQ:
                              self.socket_file)
 
         self.listen_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
-        
+
         if os.path.exists(self.socket_file):
             os.remove(self.socket_file)
         try:
@@ -196,7 +195,7 @@ class MsgQ:
 
         if self.verbose:
             sys.stdout.write("[b10-msgq] Listening\n")
-        
+
         self.runnable = True
 
     def process_accept(self):
@@ -293,9 +292,6 @@ class MsgQ:
             sys.stderr.write("[b10-msgq] Routing decode error: %s\n" % err)
             return
 
-#        sys.stdout.write("\t" + pprint.pformat(routingmsg) + "\n")
-#        sys.stdout.write("\t" + pprint.pformat(data) + "\n")
-
         self.process_command(fd, sock, routingmsg, data)
 
     def process_command(self, fd, sock, routing, data):
@@ -357,7 +353,18 @@ class MsgQ:
         if fileno in self.sendbuffs:
             amount_sent = 0
         else:
-            amount_sent = self.__send_data(sock, msg)
+            try:
+                amount_sent = self.__send_data(sock, msg)
+            except socket.error as sockerr:
+                # in the case the other side seems gone, kill the socket
+                # and drop the send action
+                if sockerr.errno == errno.EPIPE:
+                    print("[b10-msgq] SIGPIPE on send, dropping message " +
+                          "and closing connection")
+                    self.kill_socket(fileno, sock)
+                    return
+                else:
+                    raise
 
         # Still something to send
         if amount_sent < len(msg):
@@ -448,12 +455,12 @@ class MsgQ:
 
     def run(self):
         """Process messages.  Forever.  Mostly."""
-        
+
         if self.poller:
             self.run_poller()
         else:
             self.run_kqueue()
-    
+
     def run_poller(self):
         while True:
             try:
@@ -511,7 +518,7 @@ def signal_handler(signal, frame):
 
 if __name__ == "__main__":
     def check_port(option, opt_str, value, parser):
-        """Function to insure that the port we are passed is actually 
+        """Function to insure that the port we are passed is actually
         a valid port number. Used by OptionParser() on startup."""
         intval = int(value)
         if (intval < 0) or (intval > 65535):
diff --git a/src/bin/msgq/tests/Makefile.am b/src/bin/msgq/tests/Makefile.am
index 50c1e6e..50b218b 100644
--- a/src/bin/msgq/tests/Makefile.am
+++ b/src/bin/msgq/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,7 +19,7 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_builddir)/src/bin/msgq:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/msgq \
 	BIND10_TEST_SOCKET_FILE=$(builddir)/test_msgq_socket.sock \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/resolver/b10-resolver.8 b/src/bin/resolver/b10-resolver.8
index 849092c..9161ec2 100644
--- a/src/bin/resolver/b10-resolver.8
+++ b/src/bin/resolver/b10-resolver.8
@@ -2,12 +2,12 @@
 .\"     Title: b10-resolver
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: February 17, 2011
+.\"      Date: August 17, 2011
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"
-.TH "B10\-RESOLVER" "8" "February 17, 2011" "BIND10" "BIND10"
+.TH "B10\-RESOLVER" "8" "August 17, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" -----------------------------------------------------------------
@@ -54,7 +54,7 @@ must be either a valid numeric user ID or a valid user name\&. By default the da
 .PP
 \fB\-v\fR
 .RS 4
-Enabled verbose mode\&. This enables diagnostic messages to STDERR\&.
+Enable verbose mode\&. This sets logging to the maximum debugging level\&.
 .RE
 .SH "CONFIGURATION AND COMMANDS"
 .PP
@@ -77,6 +77,25 @@ string and
 number\&. The defaults are address ::1 port 53 and address 127\&.0\&.0\&.1 port 53\&.
 .PP
 
+
+
+
+
+
+\fIquery_acl\fR
+is a list of query access control rules\&. The list items are the
+\fIaction\fR
+string and the
+\fIfrom\fR
+or
+\fIkey\fR
+strings\&. The possible actions are ACCEPT, REJECT and DROP\&. The
+\fIfrom\fR
+is a remote (source) IPv4 or IPv6 address or special keyword\&. The
+\fIkey\fR
+is a TSIG key name\&. The default configuration accepts queries from 127\&.0\&.0\&.1 and ::1\&.
+.PP
+
 \fIretries\fR
 is the number of times to retry (resend query) after a query timeout (\fItimeout_query\fR)\&. The default is 3\&.
 .PP
@@ -88,7 +107,7 @@ to use directly as root servers to start resolving\&. The list items are the
 \fIaddress\fR
 string and
 \fIport\fR
-number\&. If empty, a hardcoded address for F\-root (192\&.5\&.5\&.241) is used\&.
+number\&. By default, a hardcoded address for l\&.root\-servers\&.net (199\&.7\&.83\&.42 or 2001:500:3::42) is used\&.
 .PP
 
 \fItimeout_client\fR
@@ -121,8 +140,7 @@ BIND 10 Guide\&.
 .PP
 The
 \fBb10\-resolver\fR
-daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&.
-
+daemon was first coded in September 2010\&. The initial implementation only provided forwarding\&. Iteration was introduced in January 2011\&. Caching was implemented in February 2011\&. Access control was introduced in June 2011\&.
 .SH "COPYRIGHT"
 .br
 Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/resolver/b10-resolver.xml b/src/bin/resolver/b10-resolver.xml
index bdf4f8a..75cced7 100644
--- a/src/bin/resolver/b10-resolver.xml
+++ b/src/bin/resolver/b10-resolver.xml
@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>February 17, 2011</date>
+    <date>August 17, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -99,11 +99,14 @@
         </listitem>
       </varlistentry>
 
+<!-- TODO: this needs to be fixed as -v on command line
+should imply stdout or stderr output also -->
+<!-- TODO: can this -v be overidden by configuration or bindctl? -->
       <varlistentry>
         <term><option>-v</option></term>
         <listitem><para>
-          Enabled verbose mode. This enables diagnostic messages to
-          STDERR.
+          Enable verbose mode.
+          This sets logging to the maximum debugging level.
         </para></listitem>
       </varlistentry>
 
@@ -147,6 +150,22 @@ once that is merged you can for instance do 'config add Resolver/forward_address
     </para>
 
     <para>
+<!-- TODO: need more explanation or point to guide. -->
+<!-- TODO: what about a netmask or cidr? -->
+<!-- TODO: document "key" -->
+<!-- TODO: where are the TSIG keys defined? -->
+<!-- TODO: key and from are mutually exclusive? what if both defined? -->
+      <varname>query_acl</varname> is a list of query access control
+      rules. The list items are the <varname>action</varname> string
+      and the <varname>from</varname> or <varname>key</varname> strings.
+      The possible actions are ACCEPT, REJECT and DROP.
+      The <varname>from</varname> is a remote (source) IPv4 or IPv6
+      address or special keyword.
+      The <varname>key</varname> is a TSIG key name.
+      The default configuration accepts queries from 127.0.0.1 and ::1.
+    </para>
+
+    <para>
       <varname>retries</varname> is the number of times to retry
       (resend query) after a query timeout
       (<varname>timeout_query</varname>).
@@ -159,8 +178,10 @@ once that is merged you can for instance do 'config add Resolver/forward_address
       root servers to start resolving.
       The list items are the <varname>address</varname> string
       and <varname>port</varname> number.
-      If empty, a hardcoded address for F-root (192.5.5.241) is used.
+      By default, a hardcoded address for l.root-servers.net
+      (199.7.83.42 or 2001:500:3::42) is used.
     </para>
+<!-- TODO: this is broken, see ticket #1184 -->
 
     <para>
       <varname>timeout_client</varname> is the number of milliseconds
@@ -234,7 +255,8 @@ once that is merged you can for instance do 'config add Resolver/forward_address
       The <command>b10-resolver</command> daemon was first coded in
       September 2010. The initial implementation only provided
       forwarding. Iteration was introduced in January 2011.
-<!-- TODO: document when caching was added -->
+      Caching was implemented in February 2011.
+      Access control was introduced in June 2011.
 <!-- TODO: document when validation was added -->
     </para>
   </refsect1>
diff --git a/src/bin/resolver/resolver.cc b/src/bin/resolver/resolver.cc
index fb9621b..bb1eb3b 100644
--- a/src/bin/resolver/resolver.cc
+++ b/src/bin/resolver/resolver.cc
@@ -520,7 +520,8 @@ ResolverImpl::processNormalQuery(const IOMessage& io_message,
     const Client client(io_message);
     const BasicAction query_action(
         getQueryACL().execute(acl::dns::RequestContext(
-                                  client.getRequestSourceIPAddress())));
+                                  client.getRequestSourceIPAddress(),
+                                  query_message->getTSIGRecord())));
     if (query_action == isc::acl::REJECT) {
         LOG_INFO(resolver_logger, RESOLVER_QUERY_REJECTED)
             .arg(question->getName()).arg(qtype).arg(qclass).arg(client);
@@ -539,7 +540,7 @@ ResolverImpl::processNormalQuery(const IOMessage& io_message,
     // ACL passed.  Reject inappropriate queries for the resolver.
     if (qtype == RRType::AXFR()) {
         if (io_message.getSocket().getProtocol() == IPPROTO_UDP) {
-            // Can't process AXFR request receoved over UDP
+            // Can't process AXFR request received over UDP
             LOG_DEBUG(resolver_logger, RESOLVER_DBG_PROCESS, RESOLVER_AXFR_UDP);
             makeErrorMessage(query_message, answer_message, buffer,
                              Rcode::FORMERR());
diff --git a/src/bin/resolver/resolver_log.h b/src/bin/resolver/resolver_log.h
index 8378b98..e0e3fda 100644
--- a/src/bin/resolver/resolver_log.h
+++ b/src/bin/resolver/resolver_log.h
@@ -23,20 +23,20 @@
 /// Defines the levels used to output debug messages in the resolver.  Note that
 /// higher numbers equate to more verbose (and detailed) output.
 
-// Initialization
-const int RESOLVER_DBG_INIT = 10;
+// Initialization and shutdown of the resolver.
+const int RESOLVER_DBG_INIT = DBGLVL_START_SHUT;
 
 // Configuration messages
-const int RESOLVER_DBG_CONFIG = 30;
+const int RESOLVER_DBG_CONFIG = DBGLVL_COMMAND;
 
 // Trace sending and receiving of messages
-const int RESOLVER_DBG_IO = 50;
+const int RESOLVER_DBG_IO = DBGLVL_TRACE_BASIC;
 
 // Trace processing of messages
-const int RESOLVER_DBG_PROCESS = 70;
+const int RESOLVER_DBG_PROCESS = DBGLVL_TRACE_DETAIL;
 
 // Detailed message information
-const int RESOLVER_DBG_DETAIL = 90;
+const int RESOLVER_DBG_DETAIL = DBGLVL_TRACE_DETAIL_DATA;
 
 
 /// \brief Resolver Logger
diff --git a/src/bin/resolver/resolver_messages.mes b/src/bin/resolver/resolver_messages.mes
index b44115a..7930c52 100644
--- a/src/bin/resolver/resolver_messages.mes
+++ b/src/bin/resolver/resolver_messages.mes
@@ -78,7 +78,7 @@ specified, it will appear once for each address.
 % RESOLVER_FORWARD_QUERY processing forward query
 This is a debug message indicating that a query received by the resolver
 has passed a set of checks (message is well-formed, it is allowed by the
-ACL, it is a supported opcode etc.) and is being forwarded to upstream
+ACL, it is a supported opcode, etc.) and is being forwarded to upstream
 servers.
 
 % RESOLVER_HEADER_ERROR message received, exception when processing header: %1
@@ -116,7 +116,7 @@ so is returning a REFUSED response to the sender.
 % RESOLVER_NORMAL_QUERY processing normal query
 This is a debug message indicating that the query received by the resolver
 has passed a set of checks (message is well-formed, it is allowed by the
-ACL, it is a supported opcode etc.) and is being processed the resolver.
+ACL, it is a supported opcode, etc.) and is being processed by the resolver.
 
 % RESOLVER_NOTIFY_RECEIVED NOTIFY arrived but server is not authoritative
 The resolver has received a NOTIFY message.  As the server is not
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 97a2ba6..12ddab3 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -60,6 +60,4 @@ run_unittests_CXXFLAGS += -Wno-unused-parameter
 endif
 endif
 
-
-
 noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/resolver/tests/resolver_config_unittest.cc b/src/bin/resolver/tests/resolver_config_unittest.cc
index 698e535..c089041 100644
--- a/src/bin/resolver/tests/resolver_config_unittest.cc
+++ b/src/bin/resolver/tests/resolver_config_unittest.cc
@@ -72,7 +72,8 @@ protected:
                                           IOSocket::getDummyUDPSocket(),
                                           *endpoint));
         client.reset(new Client(*query_message));
-        request.reset(new RequestContext(client->getRequestSourceIPAddress()));
+        request.reset(new RequestContext(client->getRequestSourceIPAddress(),
+                                         NULL));
         return (*request);
     }
     void invalidTest(const string &JSON, const string& name);
diff --git a/src/bin/sockcreator/README b/src/bin/sockcreator/README
index 4dbbee7..e142d19 100644
--- a/src/bin/sockcreator/README
+++ b/src/bin/sockcreator/README
@@ -3,7 +3,7 @@ The socket creator
 
 The only thing we need higher rights than standard user is binding sockets to
 ports lower than 1024. So we will have a separate process that keeps the
-rights, while the rests drop them for security reasons.
+rights, while the rest drops them for security reasons.
 
 This process is the socket creator. Its goal is to be as simple as possible
 and to contain as little code as possible to minimise the amount of code
diff --git a/src/bin/stats/Makefile.am b/src/bin/stats/Makefile.am
index e830f65..63e2a3b 100644
--- a/src/bin/stats/Makefile.am
+++ b/src/bin/stats/Makefile.am
@@ -5,18 +5,23 @@ pkglibexecdir = $(libexecdir)/@PACKAGE@
 pkglibexec_SCRIPTS = b10-stats b10-stats-httpd
 
 b10_statsdir = $(pkgdatadir)
-b10_stats_DATA = stats.spec stats-httpd.spec stats-schema.spec
+b10_stats_DATA = stats.spec stats-httpd.spec
 b10_stats_DATA += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
-pyexec_DATA = stats_messages.py stats_httpd_messages.py
+
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
 
 CLEANFILES = b10-stats stats.pyc
 CLEANFILES += b10-stats-httpd stats_httpd.pyc
-CLEANFILES += stats_messages.py stats_messages.pyc
-CLEANFILES += stats_httpd_messages.py stats_httpd_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.pyc
 
 man_MANS = b10-stats.8 b10-stats-httpd.8
 EXTRA_DIST = $(man_MANS) b10-stats.xml b10-stats-httpd.xml
-EXTRA_DIST += stats.spec stats-httpd.spec stats-schema.spec
+EXTRA_DIST += stats.spec stats-httpd.spec
 EXTRA_DIST += stats-httpd-xml.tpl stats-httpd-xsd.tpl stats-httpd-xsl.tpl
 EXTRA_DIST += stats_messages.mes stats_httpd_messages.mes
 
@@ -30,18 +35,20 @@ b10-stats-httpd.8: b10-stats-httpd.xml
 
 endif
 
-stats_messages.py: stats_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/stats/stats_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py : stats_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_messages.mes
 
-stats_httpd_messages.py: stats_httpd_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/stats/stats_httpd_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py : stats_httpd_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/stats_httpd_messages.mes
 
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-stats: stats.py
+b10-stats: stats.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|"  stats.py >$@
 	chmod a+x $@
 
-b10-stats-httpd: stats_httpd.py
+b10-stats-httpd: stats_httpd.py $(PYTHON_LOGMSGPKG_DIR)/work/stats_httpd_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" stats_httpd.py >$@
 	chmod a+x $@
 
diff --git a/src/bin/stats/b10-stats-httpd.8 b/src/bin/stats/b10-stats-httpd.8
index ed4aafa..1206e1d 100644
--- a/src/bin/stats/b10-stats-httpd.8
+++ b/src/bin/stats/b10-stats-httpd.8
@@ -36,7 +36,7 @@ b10-stats-httpd \- BIND 10 HTTP server for HTTP/XML interface of statistics
 .PP
 
 \fBb10\-stats\-httpd\fR
-is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data from
+is a standalone HTTP server\&. It is intended for HTTP/XML interface for statistics module\&. This server process runs as a process separated from the process of the BIND 10 Stats daemon (\fBb10\-stats\fR)\&. The server is initially executed by the BIND 10 boss process (\fBbind10\fR) and eventually exited by it\&. The server is intended to be server requests by HTTP clients like web browsers and third\-party modules\&. When the server is asked, it requests BIND 10 statistics data or its schema from
 \fBb10\-stats\fR, and it sends the data back in Python dictionary format and the server converts it into XML format\&. The server sends it to the HTTP client\&. The server can send three types of document, which are XML (Extensible Markup Language), XSD (XML Schema definition) and XSL (Extensible Stylesheet Language)\&. The XML document is the statistics data of BIND 10, The XSD document is the data schema of it, and The XSL document is the style sheet to be showed for the web browsers\&. There is different URL for each document\&. But please note that you would be redirected to the URL of XML document if you request the URL of the root document\&. For example, you would be redirected to http://127\&.0\&.0\&.1:8000/bind10/statistics/xml if you request http://127\&.0\&.0\&.1:8000/\&. Please see the manual and the spec file of
 \fBb10\-stats\fR
 for more details about the items of BIND 10 statistics\&. The server uses CC session in communication with
@@ -66,10 +66,6 @@ bindctl(1)\&. Please see the manual of
 bindctl(1)
 about how to configure the settings\&.
 .PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
-.PP
 
 /usr/local/share/bind10\-devel/stats\-httpd\-xml\&.tpl
 \(em the template file of XML document\&.
diff --git a/src/bin/stats/b10-stats-httpd.xml b/src/bin/stats/b10-stats-httpd.xml
index 34c704f..c8df9b8 100644
--- a/src/bin/stats/b10-stats-httpd.xml
+++ b/src/bin/stats/b10-stats-httpd.xml
@@ -57,7 +57,7 @@
       by the BIND 10 boss process (<command>bind10</command>) and eventually
       exited by it.  The server is intended to be server requests by HTTP
       clients like web browsers and third-party modules. When the server is
-      asked, it requests BIND 10 statistics data from
+      asked, it requests BIND 10 statistics data or its schema from
       <command>b10-stats</command>, and it sends the data back in Python
       dictionary format and the server converts it into XML format. The server
       sends it to the HTTP client. The server can send three types of document,
@@ -112,12 +112,6 @@
       of <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum> about
       how to configure the settings.
     </para>
-    <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
-      <!--TODO: The filename should be computed from prefix-->
-      — This is a spec file for data schema of
-      of BIND 10 statistics. This schema cannot be configured 
-      via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
-    </para>
     <para>
       <filename>/usr/local/share/bind10-devel/stats-httpd-xml.tpl</filename>
       <!--TODO: The filename should be computed from prefix-->
@@ -138,7 +132,7 @@
   <refsect1>
     <title>CONFIGURATION AND COMMANDS</title>
     <para>
-      The configurable setting in 
+      The configurable setting in
       <filename>stats-httpd.spec</filename> is:
     </para>
     <variablelist>
diff --git a/src/bin/stats/b10-stats.8 b/src/bin/stats/b10-stats.8
index f69e4d3..0204ca1 100644
--- a/src/bin/stats/b10-stats.8
+++ b/src/bin/stats/b10-stats.8
@@ -1,22 +1,13 @@
 '\" t
 .\"     Title: b10-stats
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
-.\" Generator: DocBook XSL Stylesheets v1.76.1 <http://docbook.sf.net/>
-.\"      Date: Oct 15, 2010
+.\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
+.\"      Date: August 11, 2011
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"
-.TH "B10\-STATS" "8" "Oct 15, 2010" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el       .ds Aq '
+.TH "B10\-STATS" "8" "August 11, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" -----------------------------------------------------------------
@@ -45,9 +36,9 @@ with other modules like
 \fBb10\-auth\fR
 and so on\&. It waits for coming data from other modules, then other modules send data to stats module periodically\&. Other modules send stats data to stats module independently from implementation of stats module, so the frequency of sending data may not be constant\&. Stats module collects data and aggregates it\&.
 \fBb10\-stats\fR
-invokes "sendstats" command for
+invokes an internal command for
 \fBbind10\fR
-after its initial starting because it\*(Aqs sure to collect statistics data from
+after its initial starting because it\'s sure to collect statistics data from
 \fBbind10\fR\&.
 .SH "OPTIONS"
 .PP
@@ -59,6 +50,84 @@ This
 \fBb10\-stats\fR
 switches to verbose mode\&. It sends verbose messages to STDOUT\&.
 .RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The
+\fBb10\-stats\fR
+command does not have any configurable settings\&.
+.PP
+The configuration commands are:
+.PP
+
+
+\fBremove\fR
+removes the named statistics name and data\&.
+.PP
+
+
+\fBreset\fR
+will reset all statistics data to default values except for constant names\&. This may re\-add previously removed statistics names\&.
+.PP
+
+\fBset\fR
+.PP
+
+\fBshow\fR
+will send the statistics data in JSON format\&. By default, it outputs all the statistics data it has collected\&. An optional item name may be specified to receive individual output\&.
+.PP
+
+\fBshutdown\fR
+will shutdown the
+\fBb10\-stats\fR
+process\&. (Note that the
+\fBbind10\fR
+parent may restart it\&.)
+.PP
+
+\fBstatus\fR
+simply indicates that the daemon is running\&.
+.SH "STATISTICS DATA"
+.PP
+The
+\fBb10\-stats\fR
+daemon contains these statistics:
+.PP
+report_time
+.RS 4
+The latest report date and time in ISO 8601 format\&.
+.RE
+.PP
+stats\&.boot_time
+.RS 4
+The date and time when this daemon was started in ISO 8601 format\&. This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.
+.RE
+.PP
+stats\&.last_update_time
+.RS 4
+The date and time (in ISO 8601 format) when this daemon last received data from another component\&.
+.RE
+.PP
+stats\&.lname
+.RS 4
+This is the name used for the
+\fBb10\-msgq\fR
+command\-control channel\&. (This is a constant which can\'t be reset except by restarting
+\fBb10\-stats\fR\&.)
+.RE
+.PP
+stats\&.start_time
+.RS 4
+This is the date and time (in ISO 8601 format) when this daemon started collecting data\&.
+.RE
+.PP
+stats\&.timestamp
+.RS 4
+The current date and time represented in seconds since UNIX epoch (1970\-01\-01T0 0:00:00Z) with precision (delimited with a period) up to one hundred thousandth of second\&.
+.RE
+.PP
+See other manual pages for explanations for their statistics that are kept track by
+\fBb10\-stats\fR\&.
 .SH "FILES"
 .PP
 /usr/local/share/bind10\-devel/stats\&.spec
@@ -66,10 +135,6 @@ switches to verbose mode\&. It sends verbose messages to STDOUT\&.
 \fBb10\-stats\fR\&. It contains commands for
 \fBb10\-stats\fR\&. They can be invoked via
 bindctl(1)\&.
-.PP
-/usr/local/share/bind10\-devel/stats\-schema\&.spec
-\(em This is a spec file for data schema of of BIND 10 statistics\&. This schema cannot be configured via
-bindctl(1)\&.
 .SH "SEE ALSO"
 .PP
 
@@ -82,7 +147,7 @@ BIND 10 Guide\&.
 .PP
 The
 \fBb10\-stats\fR
-daemon was initially designed and implemented by Naoki Kambe of JPRS in Oct 2010\&.
+daemon was initially designed and implemented by Naoki Kambe of JPRS in October 2010\&.
 .SH "COPYRIGHT"
 .br
 Copyright \(co 2010 Internet Systems Consortium, Inc. ("ISC")
diff --git a/src/bin/stats/b10-stats.xml b/src/bin/stats/b10-stats.xml
index f0c472d..13ada7a 100644
--- a/src/bin/stats/b10-stats.xml
+++ b/src/bin/stats/b10-stats.xml
@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>Oct 15, 2010</date>
+    <date>August 11, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -64,9 +64,10 @@
       send stats data to stats module independently from
       implementation of stats module, so the frequency of sending data
       may not be constant. Stats module collects data and aggregates
-      it. <command>b10-stats</command> invokes "sendstats" command
+      it. <command>b10-stats</command> invokes an internal command
       for <command>bind10</command> after its initial starting because it's
       sure to collect statistics data from <command>bind10</command>.
+<!-- TODO: reword that last sentence? -->
     </para>
   </refsect1>
 
@@ -87,6 +88,123 @@
   </refsect1>
 
   <refsect1>
+    <title>CONFIGURATION AND COMMANDS</title>
+
+    <para>
+      The <command>b10-stats</command> command does not have any
+      configurable settings.
+    </para>
+
+<!-- TODO: formating -->
+    <para>
+      The configuration commands are:
+    </para>
+
+    <para>
+<!-- TODO: remove is removed in trac930 -->
+      <command>remove</command> removes the named statistics name and data.
+    </para>
+
+    <para>
+<!-- TODO: reset is removed in trac930 -->
+      <command>reset</command> will reset all statistics data to
+      default values except for constant names.
+      This may re-add previously removed statistics names.
+    </para>
+
+    <para>
+      <command>set</command>
+<!-- TODO: document this -->
+    </para>
+
+    <para>
+      <command>show</command> will send the statistics data
+      in JSON format.
+      By default, it outputs all the statistics data it has collected.
+      An optional item name may be specified to receive individual output.
+    </para>
+
+<!-- TODO: document showschema -->
+
+    <para>
+      <command>shutdown</command> will shutdown the
+      <command>b10-stats</command> process.
+      (Note that the <command>bind10</command> parent may restart it.)
+    </para>
+
+    <para>
+      <command>status</command> simply indicates that the daemon is
+      running.
+    </para>
+
+  </refsect1>
+
+  <refsect1>
+    <title>STATISTICS DATA</title>
+
+    <para>
+      The <command>b10-stats</command> daemon contains these statistics:
+    </para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term>report_time</term>
+<!-- TODO: why not named stats.report_time? -->
+        <listitem><simpara>The latest report date and time in
+          ISO 8601 format.</simpara></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>stats.boot_time</term>
+        <listitem><simpara>The date and time when this daemon was
+          started in ISO 8601 format.
+          This is a constant which can't be reset except by restarting
+          <command>b10-stats</command>.
+        </simpara></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>stats.last_update_time</term>
+        <listitem><simpara>The date and time (in ISO 8601 format)
+          when this daemon last received data from another component.
+        </simpara></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>stats.lname</term>
+        <listitem><simpara>This is the name used for the
+          <command>b10-msgq</command> command-control channel.
+          (This is a constant which can't be reset except by restarting
+          <command>b10-stats</command>.)
+        </simpara></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>stats.start_time</term>
+        <listitem><simpara>This is the date and time (in ISO 8601 format)
+          when this daemon started collecting data.
+        </simpara></listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term>stats.timestamp</term>
+        <listitem><simpara>The current date and time represented in
+          seconds since UNIX epoch (1970-01-01T0 0:00:00Z) with
+          precision (delimited with a period) up to
+          one hundred thousandth of second.</simpara></listitem>
+      </varlistentry>
+
+    </variablelist>
+
+    <para>
+      See other manual pages for explanations for their statistics
+      that are kept track by <command>b10-stats</command>.
+    </para>
+
+  </refsect1>
+
+  <refsect1>
     <title>FILES</title>
     <para><filename>/usr/local/share/bind10-devel/stats.spec</filename>
       <!--TODO: The filename should be computed from prefix-->
@@ -95,12 +213,6 @@
       invoked
       via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
     </para>
-    <para><filename>/usr/local/share/bind10-devel/stats-schema.spec</filename>
-      <!--TODO: The filename should be computed from prefix-->
-      — This is a spec file for data schema of
-      of BIND 10 statistics. This schema cannot be configured 
-      via <refentrytitle>bindctl</refentrytitle><manvolnum>1</manvolnum>.
-    </para>
   </refsect1>
 
   <refsect1>
@@ -126,7 +238,7 @@
     <title>HISTORY</title>
     <para>
       The <command>b10-stats</command> daemon was initially designed
-      and implemented by Naoki Kambe of JPRS in Oct 2010.
+      and implemented by Naoki Kambe of JPRS in October 2010.
     </para>
   </refsect1>
 </refentry><!--
diff --git a/src/bin/stats/stats-httpd-xml.tpl b/src/bin/stats/stats-httpd-xml.tpl
index d5846ad..ed91423 100644
--- a/src/bin/stats/stats-httpd-xml.tpl
+++ b/src/bin/stats/stats-httpd-xml.tpl
@@ -1,24 +1,3 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<stats:stats_data version="1.0"
-  xmlns:stats="$xsd_namespace"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="$xsd_namespace $xsd_url_path">
-  $xml_string
-</stats:stats_data>
+$xml_string
\ No newline at end of file
diff --git a/src/bin/stats/stats-httpd-xsd.tpl b/src/bin/stats/stats-httpd-xsd.tpl
index 6ad1280..cc5578a 100644
--- a/src/bin/stats/stats-httpd-xsd.tpl
+++ b/src/bin/stats/stats-httpd-xsd.tpl
@@ -1,38 +1,2 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<schema targetNamespace="$xsd_namespace"
-  xmlns="http://www.w3.org/2001/XMLSchema"
-  xmlns:stats="$xsd_namespace">
-  <annotation>
-    <documentation xml:lang="en">XML schema of the statistics
-      data in BIND 10</documentation>
-  </annotation>
-  <element name="stats_data">
-    <annotation>
-      <documentation>A set of statistics data</documentation>
-    </annotation>
-    <complexType>
-      $xsd_string
-      <attribute name="version" type="token" use="optional" default="1.0">
-        <annotation>
-          <documentation>Version number of syntax</documentation>
-        </annotation>
-      </attribute>
-    </complexType>
-  </element>
-</schema>
+$xsd_string
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
index 01ffdc6..7c2e7ae 100644
--- a/src/bin/stats/stats-httpd-xsl.tpl
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -1,23 +1,7 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
 <xsl:stylesheet version="1.0"
   xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
-  xmlns:stats="$xsd_namespace">
+  xmlns:bind10="$xsd_namespace">
   <xsl:output method="html" encoding="UTF-8"
     doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
     doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
@@ -42,13 +26,7 @@ td.title {
       </head>
       <body>
         <h1>BIND 10 Statistics</h1>
-        <table>
-          <tr>
-            <th>Title</th>
-            <th>Value</th>
-          </tr>
-          <xsl:apply-templates />
-        </table>
+        <xsl:apply-templates />
       </body>
     </html>
   </xsl:template>
diff --git a/src/bin/stats/stats-schema.spec b/src/bin/stats/stats-schema.spec
deleted file mode 100644
index 37e9c1a..0000000
--- a/src/bin/stats/stats-schema.spec
+++ /dev/null
@@ -1,87 +0,0 @@
-{
-  "module_spec": {
-    "module_name": "Stats",
-    "module_description": "Statistics data schema",
-    "config_data": [
-      {
-        "item_name": "report_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "Report time",
-        "item_description": "A date time when stats module reports",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "bind10.boot_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "bind10.BootTime",
-        "item_description": "A date time when bind10 process starts initially",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "stats.boot_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "stats.BootTime",
-        "item_description": "A date time when the stats module starts initially or when the stats module restarts",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "stats.start_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "stats.StartTime",
-        "item_description": "A date time when the stats module starts collecting data or resetting values last time",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "stats.last_update_time",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "1970-01-01T00:00:00Z",
-        "item_title": "stats.LastUpdateTime",
-        "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
-        "item_format": "date-time"
-      },
-      {
-        "item_name": "stats.timestamp",
-        "item_type": "real",
-        "item_optional": false,
-        "item_default": 0.0,
-        "item_title": "stats.Timestamp",
-        "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)",
-        "item_format": "second"
-      },
-      {
-        "item_name": "stats.lname",
-        "item_type": "string",
-        "item_optional": false,
-        "item_default": "",
-        "item_title": "stats.LocalName",
-        "item_description": "A localname of stats module given via CC protocol"
-      },
-      {
-        "item_name": "auth.queries.tcp",
-        "item_type": "integer",
-        "item_optional": false,
-        "item_default": 0,
-        "item_title": "auth.queries.tcp",
-        "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
-      },
-      {
-        "item_name": "auth.queries.udp",
-        "item_type": "integer",
-        "item_optional": false,
-        "item_default": 0,
-        "item_title": "auth.queries.udp",
-        "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
-      }
-    ],
-    "commands": []
-  }
-}
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
old mode 100644
new mode 100755
index ce3d9f4..51c4e09
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -15,231 +15,177 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
+"""
+Statistics daemon in BIND 10
+
+"""
 import sys; sys.path.append ('@@PYTHONPATH@@')
 import os
-import signal
-import select
 from time import time, strftime, gmtime
 from optparse import OptionParser, OptionValueError
-from collections import defaultdict
-from isc.config.ccsession import ModuleCCSession, create_answer
-from isc.cc import Session, SessionError
 
+import isc
+import isc.util.process
 import isc.log
-from stats_messages import *
+from isc.log_messages.stats_messages import *
 
 isc.log.init("b10-stats")
 logger = isc.log.Logger("stats")
 
-# Some constants for debug levels, these should be removed when we
-# have #1074
-DBG_STATS_MESSAGING = 30
+# Some constants for debug levels.
+DBG_STATS_MESSAGING = logger.DBGLVL_COMMAND
+
+# This is for boot_time of Stats
+_BASETIME = gmtime()
 
 # for setproctitle
-import isc.util.process
 isc.util.process.rename()
 
 # If B10_FROM_SOURCE is set in the environment, we use data files
 # from a directory relative to that, otherwise we use the ones
 # installed on the system
 if "B10_FROM_SOURCE" in os.environ:
-    BASE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
-        "src" + os.sep + "bin" + os.sep + "stats"
+    SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] + os.sep + \
+        "src" + os.sep + "bin" + os.sep + "stats" + os.sep + "stats.spec"
 else:
     PREFIX = "@prefix@"
     DATAROOTDIR = "@datarootdir@"
-    BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
-    BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
-SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
+    SPECFILE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@" + os.sep + "stats.spec"
+    SPECFILE_LOCATION = SPECFILE_LOCATION.replace("${datarootdir}", DATAROOTDIR)\
+        .replace("${prefix}", PREFIX)
 
-class Singleton(type):
+def get_timestamp():
     """
-    A abstract class of singleton pattern
+    get current timestamp
     """
-    # Because of singleton pattern: 
-    #   At the beginning of coding, one UNIX domain socket is needed
-    #  for config manager, another socket is needed for stats module,
-    #  then stats module might need two sockets. So I adopted the
-    #  singleton pattern because I avoid creating multiple sockets in
-    #  one stats module. But in the initial version stats module
-    #  reports only via bindctl, so just one socket is needed. To use
-    #  the singleton pattern is not important now. :(
+    return time()
 
-    def __init__(self, *args, **kwargs):
-        type.__init__(self, *args, **kwargs)
-        self._instances = {}
+def get_datetime(gmt=None):
+    """
+    get current datetime
+    """
+    if not gmt: gmt = gmtime()
+    return strftime("%Y-%m-%dT%H:%M:%SZ", gmt)
 
-    def __call__(self, *args, **kwargs):
-        if args not in self._instances:
-            self._instances[args]={}
-        kw = tuple(kwargs.items())
-        if  kw not in self._instances[args]:
-            self._instances[args][kw] = type.__call__(self, *args, **kwargs)
-        return self._instances[args][kw]
+def get_spec_defaults(spec):
+    """
+    extracts the default values of the items from spec specified in
+    arg, and returns the dict-type variable which is a set of the item
+    names and the default values
+    """
+    if type(spec) is not list: return {}
+    def _get_spec_defaults(spec):
+        item_type = spec['item_type']
+        if item_type == "integer":
+            return int(spec.get('item_default', 0))
+        elif item_type == "real":
+            return float(spec.get('item_default', 0.0))
+        elif item_type == "boolean":
+            return bool(spec.get('item_default', False))
+        elif item_type == "string":
+            return str(spec.get('item_default', ""))
+        elif item_type == "list":
+            return spec.get(
+                    "item_default",
+                    [ _get_spec_defaults(spec["list_item_spec"]) ])
+        elif item_type == "map":
+            return spec.get(
+                    "item_default",
+                    dict([ (s["item_name"], _get_spec_defaults(s)) for s in spec["map_item_spec"] ]) )
+        else:
+            return spec.get("item_default", None)
+    return dict([ (s['item_name'], _get_spec_defaults(s)) for s in spec ])
 
 class Callback():
     """
     A Callback handler class
     """
-    def __init__(self, name=None, callback=None, args=(), kwargs={}):
-        self.name = name
-        self.callback = callback
+    def __init__(self, command=None, args=(), kwargs={}):
+        self.command = command
         self.args = args
         self.kwargs = kwargs
 
     def __call__(self, *args, **kwargs):
-        if not args:
-            args = self.args
-        if not kwargs:
-            kwargs = self.kwargs
-        if self.callback:
-            return self.callback(*args, **kwargs)
-
-class Subject():
-    """
-    A abstract subject class of observer pattern
-    """
-    # Because of observer pattern:
-    #   In the initial release, I'm also sure that observer pattern
-    #  isn't definitely needed because the interface between gathering
-    #  and reporting statistics data is single.  However in the future
-    #  release, the interfaces may be multiple, that is, multiple
-    #  listeners may be needed. For example, one interface, which
-    #  stats module has, is for between ''config manager'' and stats
-    #  module, another interface is for between ''HTTP server'' and
-    #  stats module, and one more interface is for between ''SNMP
-    #  server'' and stats module. So by considering that stats module
-    #  needs multiple interfaces in the future release, I adopted the
-    #  observer pattern in stats module. But I don't have concrete
-    #  ideas in case of multiple listener currently.
-
-    def __init__(self):
-        self._listeners = []
-
-    def attach(self, listener):
-        if not listener in self._listeners:
-            self._listeners.append(listener)
-
-    def detach(self, listener):
-        try:
-            self._listeners.remove(listener)
-        except ValueError:
-            pass
+        if not args: args = self.args
+        if not kwargs: kwargs = self.kwargs
+        if self.command: return self.command(*args, **kwargs)
 
-    def notify(self, event, modifier=None):
-        for listener in self._listeners:
-            if modifier != listener:
-                listener.update(event)
+class StatsError(Exception):
+    """Exception class for Stats class"""
+    pass
 
-class Listener():
+class Stats:
     """
-    A abstract listener class of observer pattern
+    Main class of stats module
     """
-    def __init__(self, subject):
-        self.subject = subject
-        self.subject.attach(self)
-        self.events = {}
-
-    def update(self, name):
-        if name in self.events:
-            callback = self.events[name]
-            return callback()
-
-    def add_event(self, event):
-        self.events[event.name]=event
-
-class SessionSubject(Subject, metaclass=Singleton):
-    """
-    A concrete subject class which creates CC session object
-    """
-    def __init__(self, session=None):
-        Subject.__init__(self)
-        self.session=session
-        self.running = False
-
-    def start(self):
-        self.running = True
-        self.notify('start')
-
-    def stop(self):
+    def __init__(self):
         self.running = False
-        self.notify('stop')
-
-    def check(self):
-        self.notify('check')
-
-class CCSessionListener(Listener):
-    """
-    A concrete listener class which creates SessionSubject object and
-    ModuleCCSession object
-    """
-    def __init__(self, subject):
-        Listener.__init__(self, subject)
-        self.session = subject.session
-        self.boot_time = get_datetime()
-
         # create ModuleCCSession object
-        self.cc_session = ModuleCCSession(SPECFILE_LOCATION,
-                                          self.config_handler,
-                                          self.command_handler,
-                                          self.session)
-
-        self.session = self.subject.session = self.cc_session._session
-
-        # initialize internal data
-        self.stats_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION).get_config_spec()
-        self.stats_data = self.initialize_data(self.stats_spec)
-
-        # add event handler invoked via SessionSubject object
-        self.add_event(Callback('start', self.start))
-        self.add_event(Callback('stop', self.stop))
-        self.add_event(Callback('check', self.check))
-        # don't add 'command_' suffix to the special commands in
-        # order to prevent executing internal command via bindctl
-
+        self.mccs = isc.config.ModuleCCSession(SPECFILE_LOCATION,
+                                               self.config_handler,
+                                               self.command_handler)
+        self.cc_session = self.mccs._session
+        # get module spec
+        self.module_name = self.mccs.get_module_spec().get_module_name()
+        self.modules = {}
+        self.statistics_data = {}
         # get commands spec
-        self.commands_spec = self.cc_session.get_module_spec().get_commands_spec()
-
+        self.commands_spec = self.mccs.get_module_spec().get_commands_spec()
         # add event handler related command_handler of ModuleCCSession
-        # invoked via bindctl
+        self.callbacks = {}
         for cmd in self.commands_spec:
+            # add prefix "command_"
+            name = "command_" + cmd["command_name"]
             try:
-                # add prefix "command_"
-                name = "command_" + cmd["command_name"]
                 callback = getattr(self, name)
-                kwargs = self.initialize_data(cmd["command_args"])
-                self.add_event(Callback(name=name, callback=callback, args=(), kwargs=kwargs))
-            except AttributeError as ae:
-                logger.error(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+                kwargs = get_spec_defaults(cmd["command_args"])
+                self.callbacks[name] = Callback(command=callback, kwargs=kwargs)
+            except AttributeError:
+                raise StatsError(STATS_UNKNOWN_COMMAND_IN_SPEC, cmd["command_name"])
+        self.mccs.start()
 
     def start(self):
         """
-        start the cc chanel
+        Start stats module
         """
-        # set initial value
-        self.stats_data['stats.boot_time'] = self.boot_time
-        self.stats_data['stats.start_time'] = get_datetime()
-        self.stats_data['stats.last_update_time'] = get_datetime()
-        self.stats_data['stats.lname'] = self.session.lname
-        self.cc_session.start()
+        self.running = True
+        logger.info(STATS_STARTING)
+
         # request Bob to send statistics data
         logger.debug(DBG_STATS_MESSAGING, STATS_SEND_REQUEST_BOSS)
-        cmd = isc.config.ccsession.create_command("sendstats", None)
-        seq = self.session.group_sendmsg(cmd, 'Boss')
-        self.session.group_recvmsg(True, seq)
+        cmd = isc.config.ccsession.create_command("getstats", None)
+        seq = self.cc_session.group_sendmsg(cmd, 'Boss')
+        try:
+            answer, env = self.cc_session.group_recvmsg(False, seq)
+            if answer:
+                rcode, args = isc.config.ccsession.parse_answer(answer)
+                if rcode == 0:
+                    errors = self.update_statistics_data(
+                        args["owner"], **args["data"])
+                    if errors:
+                        raise StatsError("boss spec file is incorrect: "
+                                         + ", ".join(errors))
+                    errors = self.update_statistics_data(
+                                self.module_name,
+                                last_update_time=get_datetime())
+                    if errors:
+                        raise StatsError("stats spec file is incorrect: "
+                                         + ", ".join(errors))
+        except isc.cc.session.SessionTimeout:
+            pass
 
-    def stop(self):
-        """
-        stop the cc chanel
-        """
-        return self.cc_session.close()
+        # initialized Statistics data
+        errors = self.update_statistics_data(
+            self.module_name,
+            lname=self.cc_session.lname,
+            boot_time=get_datetime(_BASETIME)
+            )
+        if errors:
+            raise StatsError("stats spec file is incorrect: "
+                             + ", ".join(errors))
 
-    def check(self):
-        """
-        check the cc chanel
-        """
-        return self.cc_session.check_command(False)
+        while self.running:
+            self.mccs.check_command(False)
 
     def config_handler(self, new_config):
         """
@@ -247,174 +193,222 @@ class CCSessionListener(Listener):
         """
         logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_NEW_CONFIG,
                      new_config)
-
         # do nothing currently
-        return create_answer(0)
+        return isc.config.create_answer(0)
 
-    def command_handler(self, command, *args, **kwargs):
+    def command_handler(self, command, kwargs):
         """
         handle commands from the cc channel
         """
-        # add 'command_' suffix in order to executing command via bindctl
         name = 'command_' + command
-        
-        if name in self.events:
-            event = self.events[name]
-            return event(*args, **kwargs)
+        if name in self.callbacks:
+            callback = self.callbacks[name]
+            if kwargs:
+                return callback(**kwargs)
+            else:
+                return callback()
         else:
-            return self.command_unknown(command, args)
+            logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
+            return isc.config.create_answer(1, "Unknown command: '"+str(command)+"'")
 
-    def command_shutdown(self, args):
+    def update_modules(self):
         """
-        handle shutdown command
+        updates information of each module. This method gets each
+        module's information from the config manager and sets it into
+        self.modules. If its getting from the config manager fails, it
+        raises StatsError.
         """
-        logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
-        self.subject.running = False
-        return create_answer(0)
+        modules = {}
+        seq = self.cc_session.group_sendmsg(
+            isc.config.ccsession.create_command(
+                isc.config.ccsession.COMMAND_GET_STATISTICS_SPEC),
+            'ConfigManager')
+        (answer, env) = self.cc_session.group_recvmsg(False, seq)
+        if answer:
+            (rcode, value) = isc.config.ccsession.parse_answer(answer)
+            if rcode == 0:
+                for mod in value:
+                    spec = { "module_name" : mod }
+                    if value[mod] and type(value[mod]) is list:
+                        spec["statistics"] = value[mod]
+                    modules[mod] = isc.config.module_spec.ModuleSpec(spec)
+            else:
+                raise StatsError("Updating module spec fails: " + str(value))
+        modules[self.module_name] = self.mccs.get_module_spec()
+        self.modules = modules
 
-    def command_set(self, args, stats_data={}):
+    def get_statistics_data(self, owner=None, name=None):
         """
-        handle set command
+        returns statistics data which stats module has of each
+        module. If it can't find specified statistics data, it raises
+        StatsError.
         """
-        # 'args' must be dictionary type
-        self.stats_data.update(args['stats_data'])
-
-        # overwrite "stats.LastUpdateTime"
-        self.stats_data['stats.last_update_time'] = get_datetime()
-
-        return create_answer(0)
+        self.update_statistics_data()
+        if owner and name:
+            try:
+                return {owner:{name:self.statistics_data[owner][name]}}
+            except KeyError:
+                pass
+        elif owner:
+            try:
+                return {owner: self.statistics_data[owner]}
+            except KeyError:
+                pass
+        elif name:
+            pass
+        else:
+            return self.statistics_data
+        raise StatsError("No statistics data found: "
+                         + "owner: " + str(owner) + ", "
+                         + "name: " + str(name))
 
-    def command_remove(self, args, stats_item_name=''):
+    def update_statistics_data(self, owner=None, **data):
         """
-        handle remove command
+        change statistics date of specified module into specified
+        data. It updates information of each module first, and it
+        updates statistics data. If specified data is invalid for
+        statistics spec of specified owner, it returns a list of error
+        messeges. If there is no error or if neither owner nor data is
+        specified in args, it returns None.
         """
-
-        # 'args' must be dictionary type
-        if args and args['stats_item_name'] in self.stats_data:
-            stats_item_name = args['stats_item_name']
-
-        logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_REMOVE_COMMAND,
-                     stats_item_name)
-
-        # just remove one item
-        self.stats_data.pop(stats_item_name)
-
-        return create_answer(0)
-
-    def command_show(self, args, stats_item_name=''):
+        self.update_modules()
+        statistics_data = {}
+        for (name, module) in self.modules.items():
+            value = get_spec_defaults(module.get_statistics_spec())
+            if module.validate_statistics(True, value):
+                statistics_data[name] = value
+        for (name, value) in self.statistics_data.items():
+            if name in statistics_data:
+                statistics_data[name].update(value)
+            else:
+                statistics_data[name] = value
+        self.statistics_data = statistics_data
+        if owner and data:
+            errors = []
+            try:
+                if self.modules[owner].validate_statistics(False, data, errors):
+                    self.statistics_data[owner].update(data)
+                    return
+            except KeyError:
+                errors.append("unknown module name: " + str(owner))
+            return errors
+
+    def command_status(self):
         """
-        handle show command
+        handle status command
         """
+        logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
+        return isc.config.create_answer(
+            0, "Stats is up. (PID " + str(os.getpid()) + ")")
 
-        # always overwrite 'report_time' and 'stats.timestamp'
-        # if "show" command invoked
-        self.stats_data['report_time'] = get_datetime()
-        self.stats_data['stats.timestamp'] = get_timestamp()
-
-        # if with args
-        if args and args['stats_item_name'] in self.stats_data:
-            stats_item_name = args['stats_item_name']
-            logger.debug(DBG_STATS_MESSAGING,
-                         STATS_RECEIVED_SHOW_NAME_COMMAND,
-                         stats_item_name)
-            return create_answer(0, {stats_item_name: self.stats_data[stats_item_name]})
-
-        logger.debug(DBG_STATS_MESSAGING,
-                     STATS_RECEIVED_SHOW_ALL_COMMAND)
-        return create_answer(0, self.stats_data)
-
-    def command_reset(self, args):
+    def command_shutdown(self):
         """
-        handle reset command
+        handle shutdown command
         """
-        logger.debug(DBG_STATS_MESSAGING,
-                     STATS_RECEIVED_RESET_COMMAND)
-
-        # re-initialize internal variables
-        self.stats_data = self.initialize_data(self.stats_spec)
-
-        # reset initial value
-        self.stats_data['stats.boot_time'] = self.boot_time
-        self.stats_data['stats.start_time'] = get_datetime()
-        self.stats_data['stats.last_update_time'] = get_datetime()
-        self.stats_data['stats.lname'] = self.session.lname
-
-        return create_answer(0)
+        logger.info(STATS_RECEIVED_SHUTDOWN_COMMAND)
+        self.running = False
+        return isc.config.create_answer(0)
 
-    def command_status(self, args):
+    def command_show(self, owner=None, name=None):
         """
-        handle status command
+        handle show command
         """
-        logger.debug(DBG_STATS_MESSAGING, STATS_RECEIVED_STATUS_COMMAND)
-        # just return "I'm alive."
-        return create_answer(0, "I'm alive.")
-
-    def command_unknown(self, command, args):
+        if owner or name:
+            logger.debug(DBG_STATS_MESSAGING,
+                         STATS_RECEIVED_SHOW_NAME_COMMAND,
+                         str(owner)+", "+str(name))
+        else:
+            logger.debug(DBG_STATS_MESSAGING,
+                         STATS_RECEIVED_SHOW_ALL_COMMAND)
+        errors = self.update_statistics_data(
+            self.module_name,
+            timestamp=get_timestamp(),
+            report_time=get_datetime()
+            )
+        if errors:
+            raise StatsError("stats spec file is incorrect: "
+                             + ", ".join(errors))
+        try:
+            return isc.config.create_answer(
+                0, self.get_statistics_data(owner, name))
+        except StatsError:
+            return isc.config.create_answer(
+                1, "specified arguments are incorrect: " \
+                    + "owner: " + str(owner) + ", name: " + str(name))
+
+    def command_showschema(self, owner=None, name=None):
         """
-        handle an unknown command
+        handle show command
         """
-        logger.error(STATS_RECEIVED_UNKNOWN_COMMAND, command)
-        return create_answer(1, "Unknown command: '"+str(command)+"'")
-
+        if owner or name:
+            logger.debug(DBG_STATS_MESSAGING,
+                         STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND,
+                         str(owner)+", "+str(name))
+        else:
+            logger.debug(DBG_STATS_MESSAGING,
+                         STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND)
+        self.update_modules()
+        schema = {}
+        schema_byname = {}
+        for mod in self.modules:
+            spec = self.modules[mod].get_statistics_spec()
+            schema_byname[mod] = {}
+            if spec:
+                schema[mod] = spec
+                for item in spec:
+                    schema_byname[mod][item['item_name']] = item
+        if owner:
+            try:
+                if name:
+                    return isc.config.create_answer(0, {owner:[schema_byname[owner][name]]})
+                else:
+                    return isc.config.create_answer(0, {owner:schema[owner]})
+            except KeyError:
+                pass
+        else:
+            if name:
+                return isc.config.create_answer(1, "module name is not specified")
+            else:
+                return isc.config.create_answer(0, schema)
+        return isc.config.create_answer(
+                1, "specified arguments are incorrect: " \
+                    + "owner: " + str(owner) + ", name: " + str(name))
 
-    def initialize_data(self, spec):
+    def command_set(self, owner, data):
         """
-        initialize stats data
+        handle set command
         """
-        def __get_init_val(spec):
-            if spec['item_type'] == 'null':
-                return None
-            elif spec['item_type'] == 'boolean':
-                return bool(spec.get('item_default', False))
-            elif spec['item_type'] == 'string':
-                return str(spec.get('item_default', ''))
-            elif spec['item_type'] in set(['number', 'integer']):
-                return int(spec.get('item_default', 0))
-            elif spec['item_type'] in set(['float', 'double', 'real']):
-                return float(spec.get('item_default', 0.0))
-            elif spec['item_type'] in set(['list', 'array']):
-                return spec.get('item_default',
-                                [ __get_init_val(s) for s in spec['list_item_spec'] ])
-            elif spec['item_type'] in set(['map', 'object']):
-                return spec.get('item_default',
-                                dict([ (s['item_name'], __get_init_val(s)) for s in spec['map_item_spec'] ]) )
-            else:
-                return spec.get('item_default')
-        return dict([ (s['item_name'], __get_init_val(s)) for s in spec ])
+        errors = self.update_statistics_data(owner, **data)
+        if errors:
+            return isc.config.create_answer(
+                1, "errors while setting statistics data: " \
+                    + ", ".join(errors))
+        errors = self.update_statistics_data(
+            self.module_name, last_update_time=get_datetime() )
+        if errors:
+            raise StatsError("stats spec file is incorrect: "
+                             + ", ".join(errors))
+        return isc.config.create_answer(0)
 
-def get_timestamp():
-    """
-    get current timestamp
-    """
-    return time()
-
-def get_datetime():
-    """
-    get current datetime
-    """
-    return strftime("%Y-%m-%dT%H:%M:%SZ", gmtime())
-
-def main(session=None):
+if __name__ == "__main__":
     try:
         parser = OptionParser()
-        parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
-                      help="display more about what is going on")
+        parser.add_option(
+            "-v", "--verbose", dest="verbose", action="store_true",
+            help="display more about what is going on")
         (options, args) = parser.parse_args()
         if options.verbose:
             isc.log.init("b10-stats", "DEBUG", 99)
-        subject = SessionSubject(session=session)
-        listener = CCSessionListener(subject)
-        subject.start()
-        while subject.running:
-            subject.check()
-        subject.stop()
-
+        stats = Stats()
+        stats.start()
     except OptionValueError as ove:
         logger.fatal(STATS_BAD_OPTION_VALUE, ove)
-    except SessionError as se:
+        sys.exit(1)
+    except isc.cc.session.SessionError as se:
         logger.fatal(STATS_CC_SESSION_ERROR, se)
+        sys.exit(1)
+    except StatsError as se:
+        logger.fatal(STATS_START_ERROR, se)
+        sys.exit(1)
     except KeyboardInterrupt as kie:
         logger.info(STATS_STOPPED_BY_KEYBOARD)
-
-if __name__ == "__main__":
-    main()
diff --git a/src/bin/stats/stats.spec b/src/bin/stats/stats.spec
index 25f6b54..e716b62 100644
--- a/src/bin/stats/stats.spec
+++ b/src/bin/stats/stats.spec
@@ -6,55 +6,119 @@
     "commands": [
       {
         "command_name": "status",
-        "command_description": "identify whether stats module is alive or not",
+        "command_description": "Show status of the stats daemon",
+        "command_args": []
+      },
+      {
+        "command_name": "shutdown",
+        "command_description": "Shut down the stats module",
         "command_args": []
       },
       {
         "command_name": "show",
-        "command_description": "show the specified/all statistics data",
+        "command_description": "Show the specified/all statistics data",
         "command_args": [
           {
-            "item_name": "stats_item_name",
+            "item_name": "owner",
             "item_type": "string",
             "item_optional": true,
-            "item_default": ""
+            "item_default": "",
+            "item_description": "module name of the owner of the statistics data"
+          },
+	  {
+	    "item_name": "name",
+            "item_type": "string",
+            "item_optional": true,
+            "item_default": "",
+            "item_description": "statistics item name of the owner"
           }
         ]
       },
       {
-        "command_name": "set",
-        "command_description": "set the value of specified name in statistics data",
+        "command_name": "showschema",
+        "command_description": "show the specified/all statistics shema",
         "command_args": [
           {
-            "item_name": "stats_data",
-            "item_type": "map",
-            "item_optional": false,
-            "item_default": {},
-            "map_item_spec": []
+            "item_name": "owner",
+            "item_type": "string",
+            "item_optional": true,
+            "item_default": "",
+            "item_description": "module name of the owner of the statistics data"
+          },
+	  {
+	    "item_name": "name",
+            "item_type": "string",
+            "item_optional": true,
+            "item_default": "",
+            "item_description": "statistics item name of the owner"
           }
         ]
       },
       {
-        "command_name": "remove",
-        "command_description": "remove the specified name from statistics data",
+        "command_name": "set",
+        "command_description": "set the value of specified name in statistics data",
         "command_args": [
           {
-            "item_name": "stats_item_name",
+            "item_name": "owner",
             "item_type": "string",
             "item_optional": false,
-            "item_default": ""
+            "item_default": "",
+            "item_description": "module name of the owner of the statistics data"
+          },
+	  {
+	    "item_name": "data",
+            "item_type": "map",
+            "item_optional": false,
+            "item_default": {},
+            "item_description": "statistics data set of the owner",
+            "map_item_spec": []
           }
         ]
+      }
+    ],
+    "statistics": [
+      {
+        "item_name": "report_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "Report time",
+        "item_description": "A date time when stats module reports",
+        "item_format": "date-time"
       },
       {
-        "command_name": "reset",
-        "command_description": "reset all statistics data to default values except for several constant names",
-        "command_args": []
+        "item_name": "boot_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "Boot time",
+        "item_description": "A date time when the stats module starts initially or when the stats module restarts",
+        "item_format": "date-time"
       },
       {
-        "command_name": "shutdown",
-        "command_description": "Shut down the stats module",
-        "command_args": []
+        "item_name": "last_update_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "Last update time",
+        "item_description": "The latest date time when the stats module receives from other modules like auth server or boss process and so on",
+        "item_format": "date-time"
+      },
+      {
+        "item_name": "timestamp",
+        "item_type": "real",
+        "item_optional": false,
+        "item_default": 0.0,
+        "item_title": "Timestamp",
+        "item_description": "A current time stamp since epoch time (1970-01-01T00:00:00Z)"
+      },
+      {
+        "item_name": "lname",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "",
+        "item_title": "Local Name",
+        "item_description": "A localname of stats module given via CC protocol"
       }
     ]
   }
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
old mode 100755
new mode 100644
index 74298cf..f265abb
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -29,21 +29,21 @@ import http.server
 import socket
 import string
 import xml.etree.ElementTree
+import urllib.parse
 
 import isc.cc
 import isc.config
 import isc.util.process
 
 import isc.log
-from stats_httpd_messages import *
+from isc.log_messages.stats_httpd_messages import *
 
 isc.log.init("b10-stats-httpd")
 logger = isc.log.Logger("stats-httpd")
 
-# Some constants for debug levels, these should be removed when we
-# have #1074
-DBG_STATHTTPD_INIT = 10
-DBG_STATHTTPD_MESSAGING = 30
+# Some constants for debug levels.
+DBG_STATHTTPD_INIT = logger.DBGLVL_START_SHUT
+DBG_STATHTTPD_MESSAGING = logger.DBGLVL_COMMAND
 
 # If B10_FROM_SOURCE is set in the environment, we use data files
 # from a directory relative to that, otherwise we use the ones
@@ -57,7 +57,6 @@ else:
     BASE_LOCATION = "@datadir@" + os.sep + "@PACKAGE@"
     BASE_LOCATION = BASE_LOCATION.replace("${datarootdir}", DATAROOTDIR).replace("${prefix}", PREFIX)
 SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd.spec"
-SCHEMA_SPECFILE_LOCATION = BASE_LOCATION + os.sep + "stats-schema.spec"
 XML_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xml.tpl"
 XSD_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsd.tpl"
 XSL_TEMPLATE_LOCATION = BASE_LOCATION + os.sep + "stats-httpd-xsl.tpl"
@@ -68,8 +67,7 @@ XML_URL_PATH = '/bind10/statistics/xml'
 XSD_URL_PATH = '/bind10/statistics/xsd'
 XSL_URL_PATH = '/bind10/statistics/xsl'
 # TODO: This should be considered later.
-XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
-DEFAULT_CONFIG = dict(listen_on=[('127.0.0.1', 8000)])
+XSD_NAMESPACE = 'http://bind10.isc.org/bind10'
 
 # Assign this process name
 isc.util.process.rename()
@@ -88,14 +86,29 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
 
     def send_head(self):
         try:
-            if self.path == XML_URL_PATH:
-                body = self.server.xml_handler()
-            elif self.path == XSD_URL_PATH:
-                body = self.server.xsd_handler()
-            elif self.path == XSL_URL_PATH:
-                body = self.server.xsl_handler()
+            req_path = self.path
+            req_path = urllib.parse.urlsplit(req_path).path
+            req_path = urllib.parse.unquote(req_path)
+            req_path = os.path.normpath(req_path)
+            path_dirs = req_path.split('/')
+            path_dirs = [ d for d in filter(None, path_dirs) ]
+            req_path = '/'+"/".join(path_dirs)
+            module_name = None
+            item_name = None
+            # in case of /bind10/statistics/xxx/YYY/zzz
+            if len(path_dirs) >= 5:
+                item_name = path_dirs[4]
+            # in case of /bind10/statistics/xxx/YYY ...
+            if len(path_dirs) >= 4:
+                module_name = path_dirs[3]
+            if req_path == '/'.join([XML_URL_PATH] + path_dirs[3:5]):
+                body = self.server.xml_handler(module_name, item_name)
+            elif req_path == '/'.join([XSD_URL_PATH] + path_dirs[3:5]):
+                body = self.server.xsd_handler(module_name, item_name)
+            elif req_path == '/'.join([XSL_URL_PATH] + path_dirs[3:5]):
+                body = self.server.xsl_handler(module_name, item_name)
             else:
-                if self.path == '/' and 'Host' in self.headers.keys():
+                if req_path == '/' and 'Host' in self.headers.keys():
                     # redirect to XML URL only when requested with '/'
                     self.send_response(302)
                     self.send_header(
@@ -107,6 +120,12 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
                     # Couldn't find HOST
                     self.send_error(404)
                     return None
+        except StatsHttpdDataError as err:
+            # Couldn't find neither specified module name nor
+            # specified item name
+            self.send_error(404)
+            logger.error(STATHTTPD_SERVER_DATAERROR, err)
+            return None
         except StatsHttpdError as err:
             self.send_error(500)
             logger.error(STATHTTPD_SERVER_ERROR, err)
@@ -148,6 +167,12 @@ class StatsHttpdError(Exception):
     main routine."""
     pass
 
+class StatsHttpdDataError(Exception):
+    """Exception class for StatsHttpd class. The reason seems to be
+    due to the data. It is intended to be thrown from the the
+    StatsHttpd object to the HttpHandler object or main routine."""
+    pass
+
 class StatsHttpd:
     """The main class of HTTP server of HTTP/XML interface for
     statistics module. It handles HTTP requests, and command channel
@@ -160,8 +185,10 @@ class StatsHttpd:
         self.mccs = None
         self.httpd = []
         self.open_mccs()
+        self.config = {}
         self.load_config()
-        self.load_templates()
+        self.http_addrs = []
+        self.mccs.start()
         self.open_httpd()
 
     def open_mccs(self):
@@ -171,10 +198,6 @@ class StatsHttpd:
         self.mccs = isc.config.ModuleCCSession(
             SPECFILE_LOCATION, self.config_handler, self.command_handler)
         self.cc_session = self.mccs._session
-        # read spec file of stats module and subscribe 'Stats'
-        self.stats_module_spec = isc.config.module_spec_from_file(SCHEMA_SPECFILE_LOCATION)
-        self.stats_config_spec = self.stats_module_spec.get_config_spec()
-        self.stats_module_name = self.stats_module_spec.get_module_name()
 
     def close_mccs(self):
         """Closes a ModuleCCSession object"""
@@ -189,18 +212,19 @@ class StatsHttpd:
         """Loads configuration from spec file or new configuration
         from the config manager"""
         # load config
-        if len(new_config) > 0:
-            self.config.update(new_config)
-        else:
-            self.config = DEFAULT_CONFIG
-            self.config.update(
-                dict([
-                        (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
-                        for itm in self.mccs.get_module_spec().get_config_spec()
-                        ])
-                )
+        if len(self.config) == 0:
+            self.config = dict([
+                (itm['item_name'], self.mccs.get_value(itm['item_name'])[0])
+                for itm in self.mccs.get_module_spec().get_config_spec()
+                ])
+        self.config.update(new_config)
         # set addresses and ports for HTTP
-        self.http_addrs = [ (cf['address'], cf['port']) for cf in self.config['listen_on'] ]
+        addrs = []
+        if 'listen_on' in self.config:
+            for cf in self.config['listen_on']:
+                if 'address' in cf and 'port' in cf:
+                    addrs.append((cf['address'], cf['port']))
+        self.http_addrs = addrs
 
     def open_httpd(self):
         """Opens sockets for HTTP. Iterating each HTTP address to be
@@ -208,46 +232,44 @@ class StatsHttpd:
         for addr in self.http_addrs:
             self.httpd.append(self._open_httpd(addr))
 
-    def _open_httpd(self, server_address, address_family=None):
+    def _open_httpd(self, server_address):
+        httpd = None
         try:
-            # try IPv6 at first
-            if address_family is not None:
-                HttpServer.address_family = address_family
-            elif socket.has_ipv6:
-                HttpServer.address_family = socket.AF_INET6
+            # get address family for the server_address before
+            # creating HttpServer object. If a specified address is
+            # not numerical, gaierror may be thrown.
+            address_family = socket.getaddrinfo(
+                server_address[0], server_address[1], 0,
+                socket.SOCK_STREAM, socket.IPPROTO_TCP, socket.AI_NUMERICHOST
+                )[0][0]
+            HttpServer.address_family = address_family
             httpd = HttpServer(
                 server_address, HttpHandler,
                 self.xml_handler, self.xsd_handler, self.xsl_handler,
                 self.write_log)
-        except (socket.gaierror, socket.error,
-                OverflowError, TypeError) as err:
-            # try IPv4 next
-            if HttpServer.address_family == socket.AF_INET6:
-                httpd = self._open_httpd(server_address, socket.AF_INET)
-            else:
-                raise HttpServerError(
-                    "Invalid address %s, port %s: %s: %s" %
-                    (server_address[0], server_address[1],
-                     err.__class__.__name__, err))
-        else:
             logger.info(STATHTTPD_STARTED, server_address[0],
                         server_address[1])
-        return httpd
+            return httpd
+        except (socket.gaierror, socket.error,
+                OverflowError, TypeError) as err:
+           if httpd:
+                httpd.server_close()
+           raise HttpServerError(
+               "Invalid address %s, port %s: %s: %s" %
+               (server_address[0], server_address[1],
+                err.__class__.__name__, err))
 
     def close_httpd(self):
         """Closes sockets for HTTP"""
-        if len(self.httpd) == 0:
-            return
-        for ht in self.httpd:
+        while len(self.httpd)>0:
+            ht = self.httpd.pop()
             logger.info(STATHTTPD_CLOSING, ht.server_address[0],
                         ht.server_address[1])
             ht.server_close()
-        self.httpd = []
 
     def start(self):
         """Starts StatsHttpd objects to run. Waiting for client
         requests by using select.select functions"""
-        self.mccs.start()
         self.running = True
         while self.running:
             try:
@@ -280,6 +302,7 @@ class StatsHttpd:
         logger.info(STATHTTPD_SHUTDOWN)
         self.close_httpd()
         self.close_mccs()
+        self.running = False
 
     def get_sockets(self):
         """Returns sockets to select.select"""
@@ -296,23 +319,27 @@ class StatsHttpd:
         addresses and ports to listen HTTP requests on."""
         logger.debug(DBG_STATHTTPD_MESSAGING, STATHTTPD_HANDLE_CONFIG,
                    new_config)
-        for key in new_config.keys():
-            if key not in DEFAULT_CONFIG and key != "version":
-                logger.error(STATHTTPD_UNKNOWN_CONFIG_ITEM, key)
+        errors = []
+        if not self.mccs.get_module_spec().\
+                validate_config(False, new_config, errors):
                 return isc.config.ccsession.create_answer(
-                    1, "Unknown known config: %s" % key)
+                    1, ", ".join(errors))
         # backup old config
         old_config = self.config.copy()
-        self.close_httpd()
         self.load_config(new_config)
+        # If the http sockets aren't opened or
+        # if new_config doesn't have'listen_on', it returns
+        if len(self.httpd) == 0 or 'listen_on' not in new_config:
+            return isc.config.ccsession.create_answer(0)
+        self.close_httpd()
         try:
             self.open_httpd()
         except HttpServerError as err:
             logger.error(STATHTTPD_SERVER_ERROR, err)
             # restore old config
-            self.config_handler(old_config)
-            return isc.config.ccsession.create_answer(
-                1, "[b10-stats-httpd] %s" % err)
+            self.load_config(old_config)
+            self.open_httpd()
+            return isc.config.ccsession.create_answer(1, str(err))
         else:
             return isc.config.ccsession.create_answer(0)
 
@@ -328,21 +355,34 @@ class StatsHttpd:
             logger.debug(DBG_STATHTTPD_MESSAGING,
                          STATHTTPD_RECEIVED_SHUTDOWN_COMMAND)
             self.running = False
-            return isc.config.ccsession.create_answer(
-                0, "Stats Httpd is shutting down.")
+            return isc.config.ccsession.create_answer(0)
         else:
             logger.debug(DBG_STATHTTPD_MESSAGING,
                          STATHTTPD_RECEIVED_UNKNOWN_COMMAND, command)
             return isc.config.ccsession.create_answer(
                 1, "Unknown command: " + str(command))
 
-    def get_stats_data(self):
+    def get_stats_data(self, owner=None, name=None):
         """Requests statistics data to the Stats daemon and returns
-        the data which obtains from it"""
+        the data which obtains from it. The first argument is the
+        module name which owns the statistics data, the second
+        argument is one name of the statistics items which the the
+        module owns. The second argument cannot be specified when the
+        first argument is not specified. It returns the statistics
+        data of the specified module or item. When the session timeout
+        or the session error is occurred, it raises
+        StatsHttpdError. When the stats daemon returns none-zero
+        value, it raises StatsHttpdDataError."""
+        param = {}
+        if owner is None and name is None:
+            param = None
+        if owner is not None:
+            param['owner'] = owner
+        if name is not None:
+            param['name'] = name
         try:
             seq = self.cc_session.group_sendmsg(
-                isc.config.ccsession.create_command('show'),
-                self.stats_module_name)
+                isc.config.ccsession.create_command('show', param), 'Stats')
             (answer, env) = self.cc_session.group_recvmsg(False, seq)
             if answer:
                 (rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -354,122 +394,424 @@ class StatsHttpd:
             if rcode == 0:
                 return value
             else:
-                raise StatsHttpdError("Stats module: %s" % str(value))
-
-    def get_stats_spec(self):
-        """Just returns spec data"""
-        return self.stats_config_spec
-
-    def load_templates(self):
-        """Setup the bodies of XSD and XSL documents to be responds to
-        HTTP clients. Before that it also creates XML tag structures by
-        using xml.etree.ElementTree.Element class and substitutes
-        concrete strings with parameters embed in the string.Template
-        object."""
+                raise StatsHttpdDataError("Stats module: %s" % str(value))
+
+    def get_stats_spec(self, owner=None, name=None):
+        """Requests statistics data to the Stats daemon and returns
+        the data which obtains from it. The first argument is the
+        module name which owns the statistics data, the second
+        argument is one name of the statistics items which the the
+        module owns. The second argument cannot be specified when the
+        first argument is not specified. It returns the statistics
+        specification of the specified module or item. When the
+        session timeout or the session error is occurred, it raises
+        StatsHttpdError. When the stats daemon returns none-zero
+        value, it raises StatsHttpdDataError."""
+        param = {}
+        if owner is None and name is None:
+            param = None
+        if owner is not None:
+            param['owner'] = owner
+        if name is not None:
+            param['name'] = name
+        try:
+            seq = self.cc_session.group_sendmsg(
+                isc.config.ccsession.create_command('showschema', param), 'Stats')
+            (answer, env) = self.cc_session.group_recvmsg(False, seq)
+            if answer:
+                (rcode, value) = isc.config.ccsession.parse_answer(answer)
+                if rcode == 0:
+                    return value
+                else:
+                    raise StatsHttpdDataError("Stats module: %s" % str(value))
+        except (isc.cc.session.SessionTimeout,
+                isc.cc.session.SessionError) as err:
+            raise StatsHttpdError("%s: %s" %
+                                  (err.__class__.__name__, err))
+
+
+    def xml_handler(self, module_name=None, item_name=None):
+        """Requests the specified statistics data and specification by
+        using the functions get_stats_data and get_stats_spec
+        respectively and loads the XML template file and returns the
+        string of the XML document.The first argument is the module
+        name which owns the statistics data, the second argument is
+        one name of the statistics items which the the module
+        owns. The second argument cannot be specified when the first
+        argument is not specified."""
+
+        # TODO: Separate the following recursive function by type of
+        # the parameter. Because we should be sure what type there is
+        # when we call it recursively.
+        def stats_data2xml(stats_spec, stats_data, xml_elem):
+            """Internal use for xml_handler. Reads stats_data and
+            stats_spec specified as first and second arguments, and
+            modify the xml object specified as third
+            argument. xml_elem must be modified and always returns
+            None."""
+            # assumed started with module_spec or started with
+            # item_spec in statistics
+            if type(stats_spec) is dict:
+                # assumed started with module_spec
+                if 'item_name' not in stats_spec \
+                        and 'item_type' not in stats_spec:
+                    for module_name in stats_spec.keys():
+                        elem = xml.etree.ElementTree.Element(module_name)
+                        stats_data2xml(stats_spec[module_name],
+                                       stats_data[module_name], elem)
+                        xml_elem.append(elem)
+                # started with item_spec in statistics
+                else:
+                    elem = xml.etree.ElementTree.Element(stats_spec['item_name'])
+                    if stats_spec['item_type'] == 'map':
+                        stats_data2xml(stats_spec['map_item_spec'],
+                                       stats_data,
+                                       elem)
+                    elif stats_spec['item_type'] == 'list':
+                        for item in stats_data:
+                            stats_data2xml(stats_spec['list_item_spec'],
+                                           item, elem)
+                    else:
+                        elem.text = str(stats_data)
+                    xml_elem.append(elem)
+            # assumed started with stats_spec
+            elif type(stats_spec) is list:
+                for item_spec in stats_spec:
+                    stats_data2xml(item_spec,
+                                   stats_data[item_spec['item_name']],
+                                   xml_elem)
+
+        stats_spec = self.get_stats_spec(module_name, item_name)
+        stats_data = self.get_stats_data(module_name, item_name)
+        # make the path xxx/module/item if specified respectively
+        path_info = ''
+        if module_name is not None and item_name is not None:
+            path_info = '/' + module_name + '/' + item_name
+        elif module_name is not None:
+            path_info = '/' + module_name
+        xml_elem = xml.etree.ElementTree.Element(
+            'bind10:statistics',
+            attrib={ 'xsi:schemaLocation' : XSD_NAMESPACE + ' ' + XSD_URL_PATH + path_info,
+                     'xmlns:bind10' : XSD_NAMESPACE,
+                     'xmlns:xsi' : "http://www.w3.org/2001/XMLSchema-instance" })
+        stats_data2xml(stats_spec, stats_data, xml_elem)
+        # The coding conversion is tricky. xml..tostring() of Python 3.2
+        # returns bytes (not string) regardless of the coding, while
+        # tostring() of Python 3.1 returns a string.  To support both
+        # cases transparently, we first make sure tostring() returns
+        # bytes by specifying utf-8 and then convert the result to a
+        # plain string (code below assume it).
+        # FIXME: Non-ASCII characters might be lost here. Consider how
+        # the whole system should handle non-ASCII characters.
+        xml_string = str(xml.etree.ElementTree.tostring(xml_elem, encoding='utf-8'),
+                         encoding='us-ascii')
+        self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
+            xml_string=xml_string,
+            xsl_url_path=XSL_URL_PATH + path_info)
+        assert self.xml_body is not None
+        return self.xml_body
+
+    def xsd_handler(self, module_name=None, item_name=None):
+        """Requests the specified statistics specification by using
+        the function get_stats_spec respectively and loads the XSD
+        template file and returns the string of the XSD document.The
+        first argument is the module name which owns the statistics
+        data, the second argument is one name of the statistics items
+        which the the module owns. The second argument cannot be
+        specified when the first argument is not specified."""
+
+        # TODO: Separate the following recursive function by type of
+        # the parameter. Because we should be sure what type there is
+        # when we call it recursively.
+        def stats_spec2xsd(stats_spec, xsd_elem):
+            """Internal use for xsd_handler. Reads stats_spec
+            specified as first arguments, and modify the xml object
+            specified as second argument. xsd_elem must be
+            modified. Always returns None with no exceptions."""
+            # assumed module_spec or one stats_spec
+            if type(stats_spec) is dict:
+                # assumed module_spec
+                if 'item_name' not in stats_spec:
+                    for mod in stats_spec.keys():
+                        elem = xml.etree.ElementTree.Element(
+                            "element", { "name" : mod })
+                        complextype = xml.etree.ElementTree.Element("complexType")
+                        alltag = xml.etree.ElementTree.Element("all")
+                        stats_spec2xsd(stats_spec[mod], alltag)
+                        complextype.append(alltag)
+                        elem.append(complextype)
+                        xsd_elem.append(elem)
+                # assumed stats_spec
+                else:
+                    if stats_spec['item_type'] == 'map':
+                        alltag = xml.etree.ElementTree.Element("all")
+                        stats_spec2xsd(stats_spec['map_item_spec'], alltag)
+                        complextype = xml.etree.ElementTree.Element("complexType")
+                        complextype.append(alltag)
+                        elem = xml.etree.ElementTree.Element(
+                            "element", attrib={ "name" : stats_spec["item_name"],
+                                                "minOccurs": "0" \
+                                                    if stats_spec["item_optional"] \
+                                                    else "1",
+                                                "maxOccurs": "unbounded" })
+                        elem.append(complextype)
+                        xsd_elem.append(elem)
+                    elif stats_spec['item_type'] == 'list':
+                        alltag = xml.etree.ElementTree.Element("sequence")
+                        stats_spec2xsd(stats_spec['list_item_spec'], alltag)
+                        complextype = xml.etree.ElementTree.Element("complexType")
+                        complextype.append(alltag)
+                        elem = xml.etree.ElementTree.Element(
+                            "element", attrib={ "name" : stats_spec["item_name"],
+                                                "minOccurs": "0" \
+                                                    if stats_spec["item_optional"] \
+                                                    else "1",
+                                                "maxOccurs": "1" })
+                        elem.append(complextype)
+                        xsd_elem.append(elem)
+                    else:
+                        # determine the datatype of XSD
+                        # TODO: Should consider other item_format types
+                        datatype = stats_spec["item_type"] \
+                            if stats_spec["item_type"].lower() != 'real' \
+                            else 'float'
+                        if "item_format" in stats_spec:
+                            item_format = stats_spec["item_format"]
+                            if datatype.lower() == 'string' \
+                                    and item_format.lower() == 'date-time':
+                                 datatype = 'dateTime'
+                            elif datatype.lower() == 'string' \
+                                    and (item_format.lower() == 'date' \
+                                             or item_format.lower() == 'time'):
+                                 datatype = item_format.lower()
+                        elem = xml.etree.ElementTree.Element(
+                            "element",
+                            attrib={
+                                'name' : stats_spec["item_name"],
+                                'type' : datatype,
+                                'minOccurs' : "0" \
+                                    if stats_spec["item_optional"] \
+                                    else "1",
+                                'maxOccurs' : "1"
+                                }
+                            )
+                        annotation = xml.etree.ElementTree.Element("annotation")
+                        appinfo = xml.etree.ElementTree.Element("appinfo")
+                        documentation = xml.etree.ElementTree.Element("documentation")
+                        if "item_title" in stats_spec:
+                            appinfo.text = stats_spec["item_title"]
+                        if "item_description" in stats_spec:
+                            documentation.text = stats_spec["item_description"]
+                        annotation.append(appinfo)
+                        annotation.append(documentation)
+                        elem.append(annotation)
+                        xsd_elem.append(elem)
+            # multiple stats_specs
+            elif type(stats_spec) is list:
+                for item_spec in stats_spec:
+                    stats_spec2xsd(item_spec, xsd_elem)
+
         # for XSD
-        xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
-        for item in self.get_stats_spec():
-            element = xml.etree.ElementTree.Element(
-                "element",
-                dict( name=item["item_name"],
-                      type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
-                      minOccurs="1",
-                      maxOccurs="1" ),
-                )
-            annotation = xml.etree.ElementTree.Element("annotation")
-            appinfo = xml.etree.ElementTree.Element("appinfo")
-            documentation = xml.etree.ElementTree.Element("documentation")
-            appinfo.text = item["item_title"]
-            documentation.text = item["item_description"]
-            annotation.append(appinfo)
-            annotation.append(documentation)
-            element.append(annotation)
-            xsd_root.append(element)
+        stats_spec = self.get_stats_spec(module_name, item_name)
+        alltag = xml.etree.ElementTree.Element("all")
+        stats_spec2xsd(stats_spec, alltag)
+        complextype = xml.etree.ElementTree.Element("complexType")
+        complextype.append(alltag)
+        documentation = xml.etree.ElementTree.Element("documentation")
+        documentation.text = "A set of statistics data"
+        annotation = xml.etree.ElementTree.Element("annotation")
+        annotation.append(documentation)
+        elem = xml.etree.ElementTree.Element(
+            "element", attrib={ 'name' : 'statistics' })
+        elem.append(annotation)
+        elem.append(complextype)
+        documentation = xml.etree.ElementTree.Element("documentation")
+        documentation.text = "XML schema of the statistics data in BIND 10"
+        annotation = xml.etree.ElementTree.Element("annotation")
+        annotation.append(documentation)
+        xsd_root = xml.etree.ElementTree.Element(
+            "schema",
+            attrib={ 'xmlns' : "http://www.w3.org/2001/XMLSchema",
+                     'targetNamespace' : XSD_NAMESPACE,
+                     'xmlns:bind10' : XSD_NAMESPACE })
+        xsd_root.append(annotation)
+        xsd_root.append(elem)
         # The coding conversion is tricky. xml..tostring() of Python 3.2
         # returns bytes (not string) regardless of the coding, while
         # tostring() of Python 3.1 returns a string.  To support both
         # cases transparently, we first make sure tostring() returns
         # bytes by specifying utf-8 and then convert the result to a
         # plain string (code below assume it).
+        # FIXME: Non-ASCII characters might be lost here. Consider how
+        # the whole system should handle non-ASCII characters.
         xsd_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
                          encoding='us-ascii')
         self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
-            xsd_string=xsd_string,
-            xsd_namespace=XSD_NAMESPACE
-            )
+            xsd_string=xsd_string)
         assert self.xsd_body is not None
+        return self.xsd_body
+
+    def xsl_handler(self, module_name=None, item_name=None):
+        """Requests the specified statistics specification by using
+        the function get_stats_spec respectively and loads the XSL
+        template file and returns the string of the XSL document.The
+        first argument is the module name which owns the statistics
+        data, the second argument is one name of the statistics items
+        which the the module owns. The second argument cannot be
+        specified when the first argument is not specified."""
+
+        # TODO: Separate the following recursive function by type of
+        # the parameter. Because we should be sure what type there is
+        # when we call it recursively.
+        def stats_spec2xsl(stats_spec, xsl_elem, path=XML_URL_PATH):
+            """Internal use for xsl_handler. Reads stats_spec
+            specified as first arguments, and modify the xml object
+            specified as second argument. xsl_elem must be
+            modified. The third argument is a base path used for
+            making anchor tag in XSL. Always returns None with no
+            exceptions."""
+            # assumed module_spec or one stats_spec
+            if type(stats_spec) is dict:
+                # assumed module_spec
+                if 'item_name' not in stats_spec:
+                    table = xml.etree.ElementTree.Element("table")
+                    tr = xml.etree.ElementTree.Element("tr")
+                    th = xml.etree.ElementTree.Element("th")
+                    th.text = "Module Name"
+                    tr.append(th)
+                    th = xml.etree.ElementTree.Element("th")
+                    th.text = "Module Item"
+                    tr.append(th)
+                    table.append(tr)
+                    for mod in stats_spec.keys():
+                        foreach = xml.etree.ElementTree.Element(
+                            "xsl:for-each", attrib={ "select" : mod })
+                        tr = xml.etree.ElementTree.Element("tr")
+                        td = xml.etree.ElementTree.Element("td")
+                        a = xml.etree.ElementTree.Element(
+                            "a", attrib={ "href": urllib.parse.quote(path + "/" + mod) })
+                        a.text = mod
+                        td.append(a)
+                        tr.append(td)
+                        td = xml.etree.ElementTree.Element("td")
+                        stats_spec2xsl(stats_spec[mod], td,
+                                       path + "/" + mod)
+                        tr.append(td)
+                        foreach.append(tr)
+                        table.append(foreach)
+                    xsl_elem.append(table)
+                # assumed stats_spec
+                else:
+                    if stats_spec['item_type'] == 'map':
+                        table = xml.etree.ElementTree.Element("table")
+                        tr = xml.etree.ElementTree.Element("tr")
+                        th = xml.etree.ElementTree.Element("th")
+                        th.text = "Item Name"
+                        tr.append(th)
+                        th = xml.etree.ElementTree.Element("th")
+                        th.text = "Item Value"
+                        tr.append(th)
+                        table.append(tr)
+                        foreach = xml.etree.ElementTree.Element(
+                            "xsl:for-each", attrib={ "select" : stats_spec['item_name'] })
+                        tr = xml.etree.ElementTree.Element("tr")
+                        td = xml.etree.ElementTree.Element(
+                            "td",
+                            attrib={ "class" : "title",
+                                     "title" : stats_spec["item_description"] \
+                                         if "item_description" in stats_spec \
+                                         else "" })
+                        # TODO: Consider whether we should always use
+                        # the identical name "item_name" for the
+                        # user-visible name in XSL.
+                        td.text = stats_spec[ "item_title" if "item_title" in stats_spec else "item_name" ]
+                        tr.append(td)
+                        td = xml.etree.ElementTree.Element("td")
+                        stats_spec2xsl(stats_spec['map_item_spec'], td,
+                                       path + "/" + stats_spec["item_name"])
+                        tr.append(td)
+                        foreach.append(tr)
+                        table.append(foreach)
+                        xsl_elem.append(table)
+                    elif stats_spec['item_type'] == 'list':
+                        stats_spec2xsl(stats_spec['list_item_spec'], xsl_elem,
+                                       path + "/" + stats_spec["item_name"])
+                    else:
+                        xsl_valueof = xml.etree.ElementTree.Element(
+                            "xsl:value-of",
+                            attrib={'select': stats_spec["item_name"]})
+                        xsl_elem.append(xsl_valueof)
+
+            # multiple stats_specs
+            elif type(stats_spec) is list:
+                table = xml.etree.ElementTree.Element("table")
+                tr = xml.etree.ElementTree.Element("tr")
+                th = xml.etree.ElementTree.Element("th")
+                th.text = "Item Name"
+                tr.append(th)
+                th = xml.etree.ElementTree.Element("th")
+                th.text = "Item Value"
+                tr.append(th)
+                table.append(tr)
+                for item_spec in stats_spec:
+                    tr = xml.etree.ElementTree.Element("tr")
+                    td = xml.etree.ElementTree.Element(
+                        "td",
+                        attrib={ "class" : "title",
+                                 "title" : item_spec["item_description"] \
+                                     if "item_description" in item_spec \
+                                     else "" })
+                    # if the path length is equal to or shorter than
+                    # XML_URL_PATH + /Module/Item, add the anchor tag.
+                    if len(path.split('/')) <= len((XML_URL_PATH + '/Module/Item').split('/')):
+                        a = xml.etree.ElementTree.Element(
+                            "a", attrib={ "href": urllib.parse.quote(path + "/" + item_spec["item_name"]) })
+                        a.text = item_spec[ "item_title" if "item_title" in item_spec else "item_name" ]
+                        td.append(a)
+                    else:
+                        td.text = item_spec[ "item_title" if "item_title" in item_spec else "item_name" ]
+                    tr.append(td)
+                    td = xml.etree.ElementTree.Element("td")
+                    stats_spec2xsl(item_spec, td, path)
+                    tr.append(td)
+                    if item_spec['item_type'] == 'list':
+                        foreach = xml.etree.ElementTree.Element(
+                            "xsl:for-each", attrib={ "select" : item_spec['item_name'] })
+                        foreach.append(tr)
+                        table.append(foreach)
+                    else:
+                        table.append(tr)
+                xsl_elem.append(table)
 
         # for XSL
-        xsd_root = xml.etree.ElementTree.Element(
+        stats_spec = self.get_stats_spec(module_name, item_name)
+        xsd_root = xml.etree.ElementTree.Element( # started with xml:template tag
             "xsl:template",
-            dict(match="*")) # started with xml:template tag
-        for item in self.get_stats_spec():
-            tr = xml.etree.ElementTree.Element("tr")
-            td1 = xml.etree.ElementTree.Element(
-                "td", { "class" : "title",
-                        "title" : item["item_description"] })
-            td1.text = item["item_title"]
-            td2 = xml.etree.ElementTree.Element("td")
-            xsl_valueof = xml.etree.ElementTree.Element(
-                "xsl:value-of",
-                dict(select=item["item_name"]))
-            td2.append(xsl_valueof)
-            tr.append(td1)
-            tr.append(td2)
-            xsd_root.append(tr)
+            attrib={'match': "bind10:statistics"})
+        stats_spec2xsl(stats_spec, xsd_root)
         # The coding conversion is tricky. xml..tostring() of Python 3.2
         # returns bytes (not string) regardless of the coding, while
         # tostring() of Python 3.1 returns a string.  To support both
         # cases transparently, we first make sure tostring() returns
         # bytes by specifying utf-8 and then convert the result to a
         # plain string (code below assume it).
+        # FIXME: Non-ASCII characters might be lost here. Consider how
+        # the whole system should handle non-ASCII characters.
         xsl_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
                          encoding='us-ascii')
         self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
             xsl_string=xsl_string,
             xsd_namespace=XSD_NAMESPACE)
         assert self.xsl_body is not None
-
-    def xml_handler(self):
-        """Handler which requests to Stats daemon to obtain statistics
-        data and returns the body of XML document"""
-        xml_list=[]
-        for (k, v) in self.get_stats_data().items():
-            (k, v) = (str(k), str(v))
-            elem = xml.etree.ElementTree.Element(k)
-            elem.text = v
-            # The coding conversion is tricky. xml..tostring() of Python 3.2
-            # returns bytes (not string) regardless of the coding, while
-            # tostring() of Python 3.1 returns a string.  To support both
-            # cases transparently, we first make sure tostring() returns
-            # bytes by specifying utf-8 and then convert the result to a
-            # plain string (code below assume it).
-            xml_list.append(
-                str(xml.etree.ElementTree.tostring(elem, encoding='utf-8'),
-                    encoding='us-ascii'))
-        xml_string = "".join(xml_list)
-        self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
-            xml_string=xml_string,
-            xsd_namespace=XSD_NAMESPACE,
-            xsd_url_path=XSD_URL_PATH,
-            xsl_url_path=XSL_URL_PATH)
-        assert self.xml_body is not None
-        return self.xml_body
-
-    def xsd_handler(self):
-        """Handler which just returns the body of XSD document"""
-        return self.xsd_body
-
-    def xsl_handler(self):
-        """Handler which just returns the body of XSL document"""
         return self.xsl_body
 
     def open_template(self, file_name):
         """It opens a template file, and it loads all lines to a
         string variable and returns string. Template object includes
         the variable. Limitation of a file size isn't needed there."""
-        lines = "".join(
-            open(file_name, 'r').readlines())
+        f = open(file_name, 'r')
+        lines = "".join(f.readlines())
+        f.close()
         assert lines is not None
         return string.Template(lines)
 
@@ -491,7 +833,7 @@ if __name__ == "__main__":
         logger.fatal(STATHTTPD_CC_SESSION_ERROR, se)
         sys.exit(1)
     except HttpServerError as hse:
-        logger.fatal(STATHTTPD_START_SERVER_ERROR, hse)
+        logger.fatal(STATHTTPD_START_SERVER_INIT_ERROR, hse)
         sys.exit(1)
     except KeyboardInterrupt as kie:
         logger.info(STATHTTPD_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
index d0f7e2c..dbd0650 100644
--- a/src/bin/stats/stats_httpd_messages.mes
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -49,14 +49,20 @@ An unknown command has been sent to the stats-httpd module. The
 stats-httpd module will respond with an error, and the command will
 be ignored.
 
-% STATHTTPD_SERVER_ERROR http server error: %1
-An internal error occurred while handling an http request. A HTTP 500
+% STATHTTPD_SERVER_ERROR HTTP server error: %1
+An internal error occurred while handling an HTTP request. An HTTP 500
 response will be sent back, and the specific error is printed. This
 is an error condition that likely points to a module that is not
 responding correctly to statistic requests.
 
-% STATHTTPD_SERVER_INIT_ERROR http server initialization error: %1
-There was a problem initializing the http server in the stats-httpd
+% STATHTTPD_SERVER_DATAERROR HTTP server data error: %1
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+
+% STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
 module upon receiving its configuration data. The most likely cause
 is a port binding problem or a bad configuration value. The specific
 error is printed in the message. The new configuration is ignored,
@@ -65,8 +71,8 @@ and an error is sent back.
 % STATHTTPD_SHUTDOWN shutting down
 The stats-httpd daemon is shutting down.
 
-% STATHTTPD_START_SERVER_INIT_ERROR http server initialization error: %1
-There was a problem initializing the http server in the stats-httpd
+% STATHTTPD_START_SERVER_INIT_ERROR HTTP server initialization error: %1
+There was a problem initializing the HTTP server in the stats-httpd
 module upon startup. The most likely cause is that it was not able
 to bind to the listening port. The specific error is printed, and the
 module will shut down.
diff --git a/src/bin/stats/stats_messages.mes b/src/bin/stats/stats_messages.mes
index 9ad07cf..cfffb3a 100644
--- a/src/bin/stats/stats_messages.mes
+++ b/src/bin/stats/stats_messages.mes
@@ -28,16 +28,6 @@ control bus. A likely problem is that the message bus daemon
 This debug message is printed when the stats module has received a
 configuration update from the configuration manager.
 
-% STATS_RECEIVED_REMOVE_COMMAND received command to remove %1
-A remove command for the given name was sent to the stats module, and
-the given statistics value will now be removed. It will not appear in
-statistics reports until it appears in a statistics update from a
-module again.
-
-% STATS_RECEIVED_RESET_COMMAND received command to reset all statistics
-The stats module received a command to clear all collected statistics.
-The data is cleared until it receives an update from the modules again.
-
 % STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics
 The stats module received a command to show all statistics that it has
 collected.
@@ -72,4 +62,15 @@ installation problem, where the specification file stats.spec is
 from a different version of BIND 10 than the stats module itself.
 Please check your installation.
 
+% STATS_STARTING starting
+The stats module will be now starting.
+
+% STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema
+The stats module received a command to show all statistics schemas of all modules.
+
+% STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1
+The stats module received a command to show the specified statistics schema of the specified module.
 
+% STATS_START_ERROR stats module error: %1
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index dad6c48..01254d4 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,28 +1,29 @@
-SUBDIRS = isc http testdata
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
-EXTRA_DIST = $(PYTESTS) fake_time.py fake_socket.py fake_select.py
-CLEANFILES = fake_time.pyc fake_socket.pyc fake_select.pyc
+EXTRA_DIST = $(PYTESTS) test_utils.py
+CLEANFILES = test_utils.pyc
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
-	touch $(abs_top_srcdir)/.coverage 
+	touch $(abs_top_srcdir)/.coverage
 	rm -f .coverage
 	${LN_S} $(abs_top_srcdir)/.coverage .coverage
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/stats:$(abs_top_builddir)/src/bin/stats/tests:$(abs_top_builddir)/src/bin/msgq:$(abs_top_builddir)/src/lib/python/isc/config \
 	B10_FROM_SOURCE=$(abs_top_srcdir) \
+	BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
+	CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
 
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index 6d72dc2..b6847bd 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -13,147 +13,590 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats http server in a
+close to real environment.
+"""
+
 import unittest
 import os
-import http.server
-import string
-import fake_select
 import imp
-import sys
-import fake_socket
-
-import isc.cc
+import socket
+import errno
+import select
+import string
+import time
+import threading
+import http.client
+import xml.etree.ElementTree
+import random
 
+import isc
 import stats_httpd
-stats_httpd.socket = fake_socket
-stats_httpd.select = fake_select
+import stats
+from test_utils import BaseModules, ThreadingServerManager, MyStats, MyStatsHttpd, SignalHandler, send_command, send_shutdown
 
 DUMMY_DATA = {
-    "auth.queries.tcp": 10000,
-    "auth.queries.udp": 12000,
-    "bind10.boot_time": "2011-03-04T11:59:05Z",
-    "report_time": "2011-03-04T11:59:19Z",
-    "stats.boot_time": "2011-03-04T11:59:06Z",
-    "stats.last_update_time": "2011-03-04T11:59:07Z",
-    "stats.lname": "4d70d40a_c at host",
-    "stats.start_time": "2011-03-04T11:59:06Z",
-    "stats.timestamp": 1299239959.560846
+    'Boss' : {
+        "boot_time": "2011-03-04T11:59:06Z"
+        },
+    'Auth' : {
+        "queries.tcp": 2,
+        "queries.udp": 3,
+        "queries.perzone": [{
+                "zonename": "test.example",
+                "queries.tcp": 2,
+                "queries.udp": 3
+                }]
+        },
+    'Stats' : {
+        "report_time": "2011-03-04T11:59:19Z",
+        "boot_time": "2011-03-04T11:59:06Z",
+        "last_update_time": "2011-03-04T11:59:07Z",
+        "lname": "4d70d40a_c at host",
+        "timestamp": 1299239959.560846
+        }
     }
 
-def push_answer(stats_httpd):
-    stats_httpd.cc_session.group_sendmsg(
-        { 'result': 
-          [ 0, DUMMY_DATA ] }, "Stats")
+def get_availaddr(address='127.0.0.1', port=8001):
+    """returns a tuple of address and port which is available to
+    listen on the platform. The first argument is a address for
+    search. The second argument is a port for search. If a set of
+    address and port is failed on the search for the availability, the
+    port number is increased and it goes on the next trial until the
+    available set of address and port is looked up. If the port number
+    reaches over 65535, it may stop the search and raise a
+    OverflowError exception."""
+    while True:
+        for addr in socket.getaddrinfo(
+            address, port, 0,
+            socket.SOCK_STREAM, socket.IPPROTO_TCP):
+            sock = socket.socket(addr[0], socket.SOCK_STREAM)
+            try:
+                sock.bind((address, port))
+                return (address, port)
+            except socket.error:
+                continue
+            finally:
+                if sock: sock.close()
+        # This address and port number are already in use.
+        # next port number is added
+        port = port + 1
 
-def pull_query(stats_httpd):
-    (msg, env) = stats_httpd.cc_session.group_recvmsg()
-    if 'result' in msg:
-        (ret, arg) = isc.config.ccsession.parse_answer(msg)
-    else:
-        (ret, arg) = isc.config.ccsession.parse_command(msg)
-    return (ret, arg, env)
+def is_ipv6_enabled(address='::1', port=8001):
+    """checks IPv6 enabled on the platform. address for check is '::1'
+    and port for check is random number between 8001 and
+    65535. Retrying is 3 times even if it fails. The built-in socket
+    module provides a 'has_ipv6' parameter, but it's not used here
+    because there may be a situation where the value is True on an
+    environment where the IPv6 config is disabled."""
+    for p in random.sample(range(port, 65535), 3):
+        try:
+            sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
+            sock.bind((address, p))
+            return True
+        except socket.error:
+            continue
+        finally:
+            if sock: sock.close()
+    return False
 
 class TestHttpHandler(unittest.TestCase):
     """Tests for HttpHandler class"""
-
     def setUp(self):
-        self.stats_httpd = stats_httpd.StatsHttpd()
-        self.assertTrue(type(self.stats_httpd.httpd) is list)
-        self.httpd = self.stats_httpd.httpd
+        # set the signal handler for deadlock
+        self.sig_handler = SignalHandler(self.fail)
+        self.base = BaseModules()
+        self.stats_server = ThreadingServerManager(MyStats)
+        self.stats = self.stats_server.server
+        self.stats_server.run()
+        (self.address, self.port) = get_availaddr()
+        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, (self.address, self.port))
+        self.stats_httpd = self.stats_httpd_server.server
+        self.stats_httpd_server.run()
+        self.client = http.client.HTTPConnection(self.address, self.port)
+        self.client._http_vsn_str = 'HTTP/1.0\n'
+        self.client.connect()
+
+    def tearDown(self):
+        self.client.close()
+        self.stats_httpd_server.shutdown()
+        self.stats_server.shutdown()
+        self.base.shutdown()
+        # reset the signal handler
+        self.sig_handler.reset()
 
     def test_do_GET(self):
-        for ht in self.httpd:
-            self._test_do_GET(ht._handler)
+        self.assertTrue(type(self.stats_httpd.httpd) is list)
+        self.assertEqual(len(self.stats_httpd.httpd), 1)
+        self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
 
-    def _test_do_GET(self, handler):
+        def check_XML_URL_PATH(mod=None, item=None):
+            url_path = stats_httpd.XML_URL_PATH
+            if mod is not None:
+                url_path = url_path + '/' + mod
+                if item is not None:
+                    url_path = url_path + '/' + item
+            self.client.putrequest('GET', url_path)
+            self.client.endheaders()
+            response = self.client.getresponse()
+            self.assertEqual(response.getheader("Content-type"), "text/xml")
+            self.assertTrue(int(response.getheader("Content-Length")) > 0)
+            self.assertEqual(response.status, 200)
+            xml_doctype = response.readline().decode()
+            xsl_doctype = response.readline().decode()
+            self.assertTrue(len(xml_doctype) > 0)
+            self.assertTrue(len(xsl_doctype) > 0)
+            root = xml.etree.ElementTree.parse(response).getroot()
+            self.assertTrue(root.tag.find('statistics') > 0)
+            schema_loc = '{http://www.w3.org/2001/XMLSchema-instance}schemaLocation'
+            if item is None and mod is None:
+                # check the path of XSD
+                self.assertEqual(root.attrib[schema_loc],
+                                 stats_httpd.XSD_NAMESPACE + ' '
+                                 + stats_httpd.XSD_URL_PATH)
+                # check the path of XSL
+                self.assertTrue(xsl_doctype.startswith(
+                        '<?xml-stylesheet type="text/xsl" href="' + 
+                        stats_httpd.XSL_URL_PATH
+                        + '"?>'))
+                for m in DUMMY_DATA:
+                    for k in DUMMY_DATA[m].keys():
+                        self.assertIsNotNone(root.find(m + '/' + k))
+                        itm = root.find(m + '/' + k)
+                        if type(DUMMY_DATA[m][k]) is list:
+                            for v in DUMMY_DATA[m][k]:
+                                for i in v:
+                                    self.assertIsNotNone(itm.find('zones/' + i))
+            elif item is None:
+                # check the path of XSD
+                self.assertEqual(root.attrib[schema_loc],
+                                 stats_httpd.XSD_NAMESPACE + ' '
+                                 + stats_httpd.XSD_URL_PATH + '/' + mod)
+                # check the path of XSL
+                self.assertTrue(xsl_doctype.startswith( 
+                                 '<?xml-stylesheet type="text/xsl" href="'
+                                 + stats_httpd.XSL_URL_PATH + '/' + mod
+                                 + '"?>'))
+                for k in DUMMY_DATA[mod].keys():
+                    self.assertIsNotNone(root.find(mod + '/' + k))
+                    itm = root.find(mod + '/' + k)
+                    self.assertIsNotNone(itm)
+                    if type(DUMMY_DATA[mod][k]) is list:
+                        for v in DUMMY_DATA[mod][k]:
+                            for i in v:
+                                self.assertIsNotNone(itm.find('zones/' + i))
+            else:
+                # check the path of XSD
+                self.assertEqual(root.attrib[schema_loc],
+                                 stats_httpd.XSD_NAMESPACE + ' '
+                                 + stats_httpd.XSD_URL_PATH + '/' + mod + '/' + item)
+                # check the path of XSL
+                self.assertTrue(xsl_doctype.startswith( 
+                                 '<?xml-stylesheet type="text/xsl" href="'
+                                 + stats_httpd.XSL_URL_PATH + '/' + mod + '/' + item
+                                 + '"?>'))
+                self.assertIsNotNone(root.find(mod + '/' + item))
 
         # URL is '/bind10/statistics/xml'
-        handler.path = stats_httpd.XML_URL_PATH
-        push_answer(self.stats_httpd)
-        handler.do_GET()
-        (ret, arg, env) = pull_query(self.stats_httpd)
-        self.assertEqual(ret, "show")
-        self.assertIsNone(arg)
-        self.assertTrue('group' in env)
-        self.assertEqual(env['group'], 'Stats')
-        self.assertEqual(handler.response.code, 200)
-        self.assertEqual(handler.response.headers["Content-type"], "text/xml")
-        self.assertTrue(handler.response.headers["Content-Length"] > 0)
-        self.assertTrue(handler.response.wrote_headers)
-        self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
-        self.assertTrue(handler.response.body.find(stats_httpd.XSD_URL_PATH)>0)
-        for (k, v) in DUMMY_DATA.items():
-            self.assertTrue(handler.response.body.find(str(k))>0)
-            self.assertTrue(handler.response.body.find(str(v))>0)
-
-        # URL is '/bind10/statitics/xsd'
-        handler.path = stats_httpd.XSD_URL_PATH
-        handler.do_GET()
-        self.assertEqual(handler.response.code, 200)
-        self.assertEqual(handler.response.headers["Content-type"], "text/xml")
-        self.assertTrue(handler.response.headers["Content-Length"] > 0)
-        self.assertTrue(handler.response.wrote_headers)
-        self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
-        for (k, v) in DUMMY_DATA.items():
-            self.assertTrue(handler.response.body.find(str(k))>0)
-
-        # URL is '/bind10/statitics/xsl'
-        handler.path = stats_httpd.XSL_URL_PATH
-        handler.do_GET()
-        self.assertEqual(handler.response.code, 200)
-        self.assertEqual(handler.response.headers["Content-type"], "text/xml")
-        self.assertTrue(handler.response.headers["Content-Length"] > 0)
-        self.assertTrue(handler.response.wrote_headers)
-        self.assertTrue(handler.response.body.find(stats_httpd.XSD_NAMESPACE)>0)
-        for (k, v) in DUMMY_DATA.items():
-            self.assertTrue(handler.response.body.find(str(k))>0)
+        check_XML_URL_PATH(mod=None, item=None)
+        for m in DUMMY_DATA:
+            # URL is '/bind10/statistics/xml/Module'
+            check_XML_URL_PATH(mod=m)
+            for k in DUMMY_DATA[m].keys():
+                # URL is '/bind10/statistics/xml/Module/Item'
+                check_XML_URL_PATH(mod=m, item=k)
+
+        def check_XSD_URL_PATH(mod=None, item=None):
+            url_path = stats_httpd.XSD_URL_PATH
+            if mod is not None:
+                url_path = url_path + '/' + mod
+                if item is not None:
+                    url_path = url_path + '/' + item
+            self.client.putrequest('GET', url_path)
+            self.client.endheaders()
+            response = self.client.getresponse()
+            self.assertEqual(response.getheader("Content-type"), "text/xml")
+            self.assertTrue(int(response.getheader("Content-Length")) > 0)
+            self.assertEqual(response.status, 200)
+            root = xml.etree.ElementTree.parse(response).getroot()
+            url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+            self.assertTrue(root.tag.find('schema') > 0)
+            self.assertTrue(hasattr(root, 'attrib'))
+            self.assertTrue('targetNamespace' in root.attrib)
+            self.assertEqual(root.attrib['targetNamespace'],
+                             stats_httpd.XSD_NAMESPACE)
+            if mod is None and item is None:
+                for (mod, itm) in DUMMY_DATA.items():
+                    xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+                    mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+                    self.assertTrue(mod in mod_elm)
+                    for (it, val) in itm.items():
+                        xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                        itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+                        self.assertTrue(it in itm_elm)
+                        if type(val) is list:
+                            xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+                            itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[it].findall(xsdpath) ])
+                            self.assertTrue('zones' in itm_elm2)
+                            for i in val:
+                                for k in i.keys():
+                                    xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                                    self.assertTrue(
+                                        k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+            elif item is None:
+                xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+                mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+                self.assertTrue(mod in mod_elm)
+                for (it, val) in DUMMY_DATA[mod].items():
+                    xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                    itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+                    self.assertTrue(it in itm_elm)
+                    if type(val) is list:
+                        xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+                        itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[it].findall(xsdpath) ])
+                        self.assertTrue('zones' in itm_elm2)
+                        for i in val:
+                            for k in i.keys():
+                                xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                                self.assertTrue(
+                                    k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+            else:
+                xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+                mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+                self.assertTrue(mod in mod_elm)
+                xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+                self.assertTrue(item in itm_elm)
+                if type(DUMMY_DATA[mod][item]) is list:
+                    xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+                    itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[item].findall(xsdpath) ])
+                    self.assertTrue('zones' in itm_elm2)
+                    for i in DUMMY_DATA[mod][item]:
+                        for k in i.keys():
+                            xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                            self.assertTrue(
+                                k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+
+        # URL is '/bind10/statistics/xsd'
+        check_XSD_URL_PATH(mod=None, item=None)
+        for m in DUMMY_DATA:
+            # URL is '/bind10/statistics/xsd/Module'
+            check_XSD_URL_PATH(mod=m)
+            for k in DUMMY_DATA[m].keys():
+                # URL is '/bind10/statistics/xsd/Module/Item'
+                check_XSD_URL_PATH(mod=m, item=k)
+
+        def check_XSL_URL_PATH(mod=None, item=None):
+            url_path = stats_httpd.XSL_URL_PATH
+            if mod is not None:
+                url_path = url_path + '/' + mod
+                if item is not None:
+                    url_path = url_path + '/' + item
+            self.client.putrequest('GET', url_path)
+            self.client.endheaders()
+            response = self.client.getresponse()
+            self.assertEqual(response.getheader("Content-type"), "text/xml")
+            self.assertTrue(int(response.getheader("Content-Length")) > 0)
+            self.assertEqual(response.status, 200)
+            root = xml.etree.ElementTree.parse(response).getroot()
+            url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+            url_xhtml = '{http://www.w3.org/1999/xhtml}'
+            self.assertEqual(root.tag, url_trans + 'stylesheet')
+            if item is None and mod is None:
+                xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+                mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+                for (mod, itms) in DUMMY_DATA.items():
+                    self.assertTrue(mod in mod_fe)
+                    for (k, v) in itms.items():
+                        if type(v) is list:
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_trans + 'for-each'
+                            itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+                            self.assertTrue(k in itm_fe)
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'a'
+                            itm_a = [ x.attrib['href'] for x in itm_fe[k].findall(xslpath) ]
+                            self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+                            for itms in v:
+                                xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                    + url_xhtml + 'table/' + url_trans + 'for-each'
+                                itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[k].findall(xslpath) ])
+                                self.assertTrue('zones' in itm_fe)
+                                for (k, v) in itms.items():
+                                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                        + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                        + url_xhtml + 'td/' + url_trans + 'value-of'
+                                    itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+                                    self.assertTrue(k in itm_vo)
+                        else:
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                + url_xhtml + 'td/' + url_trans + 'value-of'
+                            itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+                            self.assertTrue(k in itm_vo)
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                + url_xhtml + 'td/' + url_xhtml + 'a'
+                            itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+                            self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+            elif item is None:
+                xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+                mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+                self.assertTrue(mod in mod_fe)
+                for (k, v) in DUMMY_DATA[mod].items():
+                    if type(v) is list:
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'table/' + url_trans + 'for-each'
+                        itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+                        self.assertTrue(k in itm_fe)
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'a'
+                        itm_a = [ x.attrib['href'] for x in itm_fe[k].findall(xslpath) ]
+                        self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+                        for itms in v:
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_trans + 'for-each'
+                            itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[k].findall(xslpath) ])
+                            self.assertTrue('zones' in itm_fe)
+                            for (k, v) in itms.items():
+                                xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                    + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                    + url_xhtml + 'td/' + url_trans + 'value-of'
+                                itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+                                self.assertTrue(k in itm_vo)
+                    else:
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                            + url_xhtml + 'td/' + url_trans + 'value-of'
+                        itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+                        self.assertTrue(k in itm_vo)
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                            + url_xhtml + 'td/' + url_xhtml + 'a'
+                        itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+                        self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+            else:
+                xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+                mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+                self.assertTrue(mod in mod_fe)
+                if type(DUMMY_DATA[mod][item]) is list:
+                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                        + url_xhtml + 'table/' + url_trans + 'for-each'
+                    itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+                    self.assertTrue(item in itm_fe)
+                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                        + url_xhtml + 'a'
+                    itm_a = [ x.attrib['href'] for x in itm_fe[item].findall(xslpath) ]
+                    self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + item in itm_a)
+                    for itms in DUMMY_DATA[mod][item]:
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'table/' + url_trans + 'for-each'
+                        itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[item].findall(xslpath) ])
+                        self.assertTrue('zones' in itm_fe)
+                        for (k, v) in itms.items():
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                + url_xhtml + 'td/' + url_trans + 'value-of'
+                            itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+                            self.assertTrue(k in itm_vo)
+                else:
+                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                        + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                        + url_xhtml + 'td/' + url_trans + 'value-of'
+                    itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+                    self.assertTrue(item in itm_vo)
+                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                        + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                        + url_xhtml + 'td/' + url_xhtml + 'a'
+                    itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+                    self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + item in itm_a)
+
+        # URL is '/bind10/statistics/xsl'
+        check_XSL_URL_PATH(mod=None, item=None)
+        for m in DUMMY_DATA:
+            # URL is '/bind10/statistics/xsl/Module'
+            check_XSL_URL_PATH(mod=m)
+            for k in DUMMY_DATA[m].keys():
+                # URL is '/bind10/statistics/xsl/Module/Item'
+                check_XSL_URL_PATH(mod=m, item=k)
 
         # 302 redirect
-        handler.path = '/'
-        handler.headers = {'Host': 'my.host.domain'}
-        handler.do_GET()
-        self.assertEqual(handler.response.code, 302)
-        self.assertEqual(handler.response.headers["Location"],
-                         "http://my.host.domain%s" % stats_httpd.XML_URL_PATH)
-
-        # 404 NotFound
-        handler.path = '/path/to/foo/bar'
-        handler.headers = {}
-        handler.do_GET()
-        self.assertEqual(handler.response.code, 404)
-
-        # failure case(connection with Stats is down)
-        handler.path = stats_httpd.XML_URL_PATH
-        push_answer(self.stats_httpd)
-        self.assertFalse(self.stats_httpd.cc_session._socket._closed)
-        self.stats_httpd.cc_session._socket._closed = True
-        handler.do_GET()
-        self.stats_httpd.cc_session._socket._closed = False
-        self.assertEqual(handler.response.code, 500)
-        self.stats_httpd.cc_session._clear_queues()
-
-        # failure case(Stats module returns err)
-        handler.path = stats_httpd.XML_URL_PATH
-        self.stats_httpd.cc_session.group_sendmsg(
-            { 'result': [ 1, "I have an error." ] }, "Stats")
-        self.assertFalse(self.stats_httpd.cc_session._socket._closed)
-        self.stats_httpd.cc_session._socket._closed = False
-        handler.do_GET()
-        self.assertEqual(handler.response.code, 500)
-        self.stats_httpd.cc_session._clear_queues()
+        self.client._http_vsn_str = 'HTTP/1.1'
+        self.client.putrequest('GET', '/')
+        self.client.putheader('Host', self.address)
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 302)
+        self.assertEqual(response.getheader('Location'),
+                         "http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
+
+        # 404 NotFound (random path)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', '/path/to/foo/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', '/bind10/foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', '/bind10/statistics/foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + 'Auth') # with no slash
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # 200 ok
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 200)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '#foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 200)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '?foo=bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 200)
+
+        # 404 NotFound (too long path)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Boss/boot_time/a')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # 404 NotFound (nonexistent module name)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # 404 NotFound (nonexistent item name)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Foo/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Foo/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Foo/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # 404 NotFound (existent module but nonexistent item name)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Auth/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Auth/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Auth/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+    def test_do_GET_failed1(self):
+        # checks status
+        self.assertEqual(send_command("status", "Stats"),
+                         (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+        # failure case(Stats is down)
+        self.assertTrue(self.stats.running)
+        self.assertEqual(send_shutdown("Stats"), (0, None)) # Stats is down
+        self.assertFalse(self.stats.running)
+        self.stats_httpd.cc_session.set_timeout(milliseconds=100)
+
+        # request XML
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 500)
+
+        # request XSD
+        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 500)
+
+        # request XSL
+        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 500)
+
+    def test_do_GET_failed2(self):
+        # failure case(Stats replies an error)
+        self.stats.mccs.set_command_handler(
+            lambda cmd, args: \
+                isc.config.ccsession.create_answer(1, "specified arguments are incorrect: I have an error.")
+            )
+
+        # request XML
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # request XSD
+        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # request XSL
+        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
 
     def test_do_HEAD(self):
-        for ht in self.httpd:
-            self._test_do_HEAD(ht._handler)
+        self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 200)
 
-    def _test_do_HEAD(self, handler):
-        handler.path = '/path/to/foo/bar'
-        handler.do_HEAD()
-        self.assertEqual(handler.response.code, 404)
+        self.client.putrequest('HEAD', '/path/to/foo/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
 
 class TestHttpServerError(unittest.TestCase):
     """Tests for HttpServerError exception"""
-
     def test_raises(self):
         try:
             raise stats_httpd.HttpServerError('Nothing')
@@ -162,173 +605,223 @@ class TestHttpServerError(unittest.TestCase):
 
 class TestHttpServer(unittest.TestCase):
     """Tests for HttpServer class"""
+    def setUp(self):
+        # set the signal handler for deadlock
+        self.sig_handler = SignalHandler(self.fail)
+        self.base = BaseModules()
+
+    def tearDown(self):
+        if hasattr(self, "stats_httpd"):
+            self.stats_httpd.stop()
+        self.base.shutdown()
+        # reset the signal handler
+        self.sig_handler.reset()
 
     def test_httpserver(self):
-        self.stats_httpd = stats_httpd.StatsHttpd()
-        for ht in self.stats_httpd.httpd:
-            self.assertTrue(ht.server_address in self.stats_httpd.http_addrs)
-            self.assertEqual(ht.xml_handler, self.stats_httpd.xml_handler)
-            self.assertEqual(ht.xsd_handler, self.stats_httpd.xsd_handler)
-            self.assertEqual(ht.xsl_handler, self.stats_httpd.xsl_handler)
-            self.assertEqual(ht.log_writer, self.stats_httpd.write_log)
-            self.assertTrue(isinstance(ht._handler, stats_httpd.HttpHandler))
-            self.assertTrue(isinstance(ht.socket, fake_socket.socket))
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.assertEqual(type(self.stats_httpd.httpd), list)
+        self.assertEqual(len(self.stats_httpd.httpd), 1)
+        for httpd in self.stats_httpd.httpd:
+            self.assertTrue(isinstance(httpd, stats_httpd.HttpServer))
 
 class TestStatsHttpdError(unittest.TestCase):
     """Tests for StatsHttpdError exception"""
 
-    def test_raises(self):
+    def test_raises1(self):
         try:
             raise stats_httpd.StatsHttpdError('Nothing')
         except stats_httpd.StatsHttpdError as err:
             self.assertEqual(str(err), 'Nothing')
 
+    def test_raises2(self):
+        try:
+            raise stats_httpd.StatsHttpdDataError('Nothing')
+        except stats_httpd.StatsHttpdDataError as err:
+            self.assertEqual(str(err), 'Nothing')
+
 class TestStatsHttpd(unittest.TestCase):
     """Tests for StatsHttpd class"""
 
     def setUp(self):
-        fake_socket._CLOSED = False
-        fake_socket.has_ipv6 = True
-        self.stats_httpd = stats_httpd.StatsHttpd()
+        # set the signal handler for deadlock
+        self.sig_handler = SignalHandler(self.fail)
+        self.base = BaseModules()
+        self.stats_server = ThreadingServerManager(MyStats)
+        self.stats_server.run()
+        # checking IPv6 enabled on this platform
+        self.ipv6_enabled = is_ipv6_enabled()
 
     def tearDown(self):
-        self.stats_httpd.stop()
+        if hasattr(self, "stats_httpd"):
+            self.stats_httpd.stop()
+        self.stats_server.shutdown()
+        self.base.shutdown()
+        # reset the signal handler
+        self.sig_handler.reset()
 
     def test_init(self):
-        self.assertFalse(self.stats_httpd.mccs.get_socket()._closed)
-        self.assertEqual(self.stats_httpd.mccs.get_socket().fileno(),
-                         id(self.stats_httpd.mccs.get_socket()))
-        for ht in self.stats_httpd.httpd:
-            self.assertFalse(ht.socket._closed)
-            self.assertEqual(ht.socket.fileno(), id(ht.socket))
-        fake_socket._CLOSED = True
-        self.assertRaises(isc.cc.session.SessionError,
-                          stats_httpd.StatsHttpd)
-        fake_socket._CLOSED = False
+        server_address = get_availaddr()
+        self.stats_httpd = MyStatsHttpd(server_address)
+        self.assertEqual(self.stats_httpd.running, False)
+        self.assertEqual(self.stats_httpd.poll_intval, 0.5)
+        self.assertNotEqual(len(self.stats_httpd.httpd), 0)
+        self.assertEqual(type(self.stats_httpd.mccs), isc.config.ModuleCCSession)
+        self.assertEqual(type(self.stats_httpd.cc_session), isc.cc.Session)
+        self.assertEqual(len(self.stats_httpd.config), 2)
+        self.assertTrue('listen_on' in self.stats_httpd.config)
+        self.assertEqual(len(self.stats_httpd.config['listen_on']), 1)
+        self.assertTrue('address' in self.stats_httpd.config['listen_on'][0])
+        self.assertTrue('port' in self.stats_httpd.config['listen_on'][0])
+        self.assertTrue(server_address in set(self.stats_httpd.http_addrs))
 
-    def test_mccs(self):
+    def test_openclose_mccs(self):
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.stats_httpd.close_mccs()
+        self.assertEqual(self.stats_httpd.mccs, None)
         self.stats_httpd.open_mccs()
+        self.assertIsNotNone(self.stats_httpd.mccs)
+        self.stats_httpd.mccs = None
+        self.assertEqual(self.stats_httpd.mccs, None)
+        self.assertEqual(self.stats_httpd.close_mccs(), None)
+
+    def test_mccs(self):
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.assertIsNotNone(self.stats_httpd.mccs.get_socket())
         self.assertTrue(
-            isinstance(self.stats_httpd.mccs.get_socket(), fake_socket.socket))
+            isinstance(self.stats_httpd.mccs.get_socket(), socket.socket))
         self.assertTrue(
             isinstance(self.stats_httpd.cc_session, isc.cc.session.Session))
-        self.assertTrue(
-            isinstance(self.stats_httpd.stats_module_spec, isc.config.ModuleSpec))
-        for cfg in self.stats_httpd.stats_config_spec:
-            self.assertTrue('item_name' in cfg)
-            self.assertTrue(cfg['item_name'] in DUMMY_DATA)
-        self.assertTrue(len(self.stats_httpd.stats_config_spec), len(DUMMY_DATA))
-
-    def test_load_config(self):
-        self.stats_httpd.load_config()
-        self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
+        statistics_spec = self.stats_httpd.get_stats_spec()
+        for mod in DUMMY_DATA:
+            self.assertTrue(mod in statistics_spec)
+            for cfg in statistics_spec[mod]:
+                self.assertTrue('item_name' in cfg)
+                self.assertTrue(cfg['item_name'] in DUMMY_DATA[mod])
+            self.assertTrue(len(statistics_spec[mod]), len(DUMMY_DATA[mod]))
+        self.stats_httpd.close_mccs()
+        self.assertIsNone(self.stats_httpd.mccs)
 
     def test_httpd(self):
         # dual stack (addresses is ipv4 and ipv6)
-        fake_socket.has_ipv6 = True
-        self.assertTrue(('127.0.0.1', 8000) in set(self.stats_httpd.http_addrs))
-        self.stats_httpd.http_addrs = [ ('::1', 8000), ('127.0.0.1', 8000) ]
-        self.assertTrue(
-            stats_httpd.HttpServer.address_family in set([fake_socket.AF_INET, fake_socket.AF_INET6]))
-        self.stats_httpd.open_httpd()
-        for ht in self.stats_httpd.httpd:
-            self.assertTrue(isinstance(ht.socket, fake_socket.socket))
-        self.stats_httpd.close_httpd()
+        if self.ipv6_enabled:
+            server_addresses = (get_availaddr('::1'), get_availaddr())
+            self.stats_httpd = MyStatsHttpd(*server_addresses)
+            for ht in self.stats_httpd.httpd:
+                self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+                self.assertTrue(ht.address_family in set([socket.AF_INET, socket.AF_INET6]))
+                self.assertTrue(isinstance(ht.socket, socket.socket))
 
         # dual stack (address is ipv6)
-        fake_socket.has_ipv6 = True
-        self.stats_httpd.http_addrs = [ ('::1', 8000) ]
-        self.stats_httpd.open_httpd()
-        for ht in self.stats_httpd.httpd:
-            self.assertTrue(isinstance(ht.socket, fake_socket.socket))
-        self.stats_httpd.close_httpd()
+        if self.ipv6_enabled:
+            server_addresses = get_availaddr('::1')
+            self.stats_httpd = MyStatsHttpd(server_addresses)
+            for ht in self.stats_httpd.httpd:
+                self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+                self.assertEqual(ht.address_family, socket.AF_INET6)
+                self.assertTrue(isinstance(ht.socket, socket.socket))
 
-        # dual stack (address is ipv4)
-        fake_socket.has_ipv6 = True
-        self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
-        self.stats_httpd.open_httpd()
+        # dual/single stack (address is ipv4)
+        server_addresses = get_availaddr()
+        self.stats_httpd = MyStatsHttpd(server_addresses)
         for ht in self.stats_httpd.httpd:
-            self.assertTrue(isinstance(ht.socket, fake_socket.socket))
-        self.stats_httpd.close_httpd()
+            self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+            self.assertEqual(ht.address_family, socket.AF_INET)
+            self.assertTrue(isinstance(ht.socket, socket.socket))
 
-        # only-ipv4 single stack
-        fake_socket.has_ipv6 = False
-        self.stats_httpd.http_addrs = [ ('127.0.0.1', 8000) ]
-        self.stats_httpd.open_httpd()
+        # any address (IPv4)
+        server_addresses = get_availaddr(address='0.0.0.0')
+        self.stats_httpd = MyStatsHttpd(server_addresses)
         for ht in self.stats_httpd.httpd:
-            self.assertTrue(isinstance(ht.socket, fake_socket.socket))
-        self.stats_httpd.close_httpd()
-
-        # only-ipv4 single stack (force set ipv6 )
-        fake_socket.has_ipv6 = False
-        self.stats_httpd.http_addrs = [ ('::1', 8000) ]
-        self.assertRaises(stats_httpd.HttpServerError,
-            self.stats_httpd.open_httpd)
-
-        # hostname
-        self.stats_httpd.http_addrs = [ ('localhost', 8000) ]
-        self.stats_httpd.open_httpd()
-        for ht in self.stats_httpd.httpd:
-            self.assertTrue(isinstance(ht.socket, fake_socket.socket))
-        self.stats_httpd.close_httpd()
+            self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+            self.assertEqual(ht.address_family,socket.AF_INET)
+            self.assertTrue(isinstance(ht.socket, socket.socket))
 
-        self.stats_httpd.http_addrs = [ ('my.host.domain', 8000) ]
-        self.stats_httpd.open_httpd()
-        for ht in self.stats_httpd.httpd:
-            self.assertTrue(isinstance(ht.socket, fake_socket.socket))
-        self.stats_httpd.close_httpd()
+        # any address (IPv6)
+        if self.ipv6_enabled:
+            server_addresses = get_availaddr(address='::')
+            self.stats_httpd = MyStatsHttpd(server_addresses)
+            for ht in self.stats_httpd.httpd:
+                self.assertTrue(isinstance(ht, stats_httpd.HttpServer))
+                self.assertEqual(ht.address_family,socket.AF_INET6)
+                self.assertTrue(isinstance(ht.socket, socket.socket))
+
+        # existent hostname
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd,
+                          get_availaddr(address='localhost'))
+
+        # nonexistent hostname
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('my.host.domain', 8000))
 
         # over flow of port number
-        self.stats_httpd.http_addrs = [ ('', 80000) ]
-        self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 80000))
+
         # negative
-        self.stats_httpd.http_addrs = [ ('', -8000) ]
-        self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', -8000))
+
         # alphabet
-        self.stats_httpd.http_addrs = [ ('', 'ABCDE') ]
-        self.assertRaises(stats_httpd.HttpServerError, self.stats_httpd.open_httpd)
-
-    def test_start(self):
-        self.stats_httpd.cc_session.group_sendmsg(
-            { 'command': [ "shutdown" ] }, "StatsHttpd")
-        self.stats_httpd.start()
-        self.stats_httpd = stats_httpd.StatsHttpd()
-        self.assertRaises(
-            fake_select.error, self.stats_httpd.start)
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, ('127.0.0.1', 'ABCDE'))
 
-    def test_stop(self):
-        # success case
-        fake_socket._CLOSED = False
-        self.stats_httpd.stop()
+        # Address already in use
+        server_addresses = get_availaddr()
+        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, server_addresses)
+        self.stats_httpd_server.run()
+        self.assertRaises(stats_httpd.HttpServerError, MyStatsHttpd, server_addresses)
+        send_shutdown("StatsHttpd")
+
+    def test_running(self):
+        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+        self.stats_httpd = self.stats_httpd_server.server
         self.assertFalse(self.stats_httpd.running)
-        self.assertIsNone(self.stats_httpd.mccs)
-        for ht in self.stats_httpd.httpd:
-            self.assertTrue(ht.socket._closed)
-        self.assertTrue(self.stats_httpd.cc_session._socket._closed)
+        self.stats_httpd_server.run()
+        self.assertEqual(send_command("status", "StatsHttpd"),
+                         (0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
+        self.assertTrue(self.stats_httpd.running)
+        self.assertEqual(send_shutdown("StatsHttpd"), (0, None))
+        self.assertFalse(self.stats_httpd.running)
+        self.stats_httpd_server.shutdown()
+
         # failure case
-        self.stats_httpd.cc_session._socket._closed = False
-        self.stats_httpd.open_mccs()
-        self.stats_httpd.cc_session._socket._closed = True
-        self.stats_httpd.stop() # No excetion raises
-        self.stats_httpd.cc_session._socket._closed = False
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.stats_httpd.cc_session.close()
+        self.assertRaises(ValueError, self.stats_httpd.start)
+
+    def test_failure_with_a_select_error (self):
+        """checks select.error is raised if the exception except
+        errno.EINTR is raised while it's selecting"""
+        def raise_select_except(*args):
+            raise select.error('dummy error')
+        orig_select = stats_httpd.select.select
+        stats_httpd.select.select = raise_select_except
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.assertRaises(select.error, self.stats_httpd.start)
+        stats_httpd.select.select = orig_select
+
+    def test_nofailure_with_errno_EINTR(self):
+        """checks no exception is raised if errno.EINTR is raised
+        while it's selecting"""
+        def raise_select_except(*args):
+            raise select.error(errno.EINTR)
+        orig_select = stats_httpd.select.select
+        stats_httpd.select.select = raise_select_except
+        self.stats_httpd_server = ThreadingServerManager(MyStatsHttpd, get_availaddr())
+        self.stats_httpd_server.run()
+        self.stats_httpd_server.shutdown()
+        stats_httpd.select.select = orig_select
 
     def test_open_template(self):
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
         # successful conditions
         tmpl = self.stats_httpd.open_template(stats_httpd.XML_TEMPLATE_LOCATION)
         self.assertTrue(isinstance(tmpl, string.Template))
         opts = dict(
             xml_string="<dummy></dummy>",
-            xsd_namespace="http://host/path/to/",
-            xsd_url_path="/path/to/",
             xsl_url_path="/path/to/")
         lines = tmpl.substitute(opts)
         for n in opts:
             self.assertTrue(lines.find(opts[n])>0)
         tmpl = self.stats_httpd.open_template(stats_httpd.XSD_TEMPLATE_LOCATION)
         self.assertTrue(isinstance(tmpl, string.Template))
-        opts = dict(
-            xsd_string="<dummy></dummy>",
-            xsd_namespace="http://host/path/to/")
+        opts = dict(xsd_string="<dummy></dummy>")
         lines = tmpl.substitute(opts)
         for n in opts:
             self.assertTrue(lines.find(opts[n])>0)
@@ -346,13 +839,13 @@ class TestStatsHttpd(unittest.TestCase):
             self.stats_httpd.open_template, '/path/to/foo/bar')
 
     def test_commands(self):
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
         self.assertEqual(self.stats_httpd.command_handler("status", None),
                          isc.config.ccsession.create_answer(
                 0, "Stats Httpd is up. (PID " + str(os.getpid()) + ")"))
         self.stats_httpd.running = True
         self.assertEqual(self.stats_httpd.command_handler("shutdown", None),
-                         isc.config.ccsession.create_answer(
-                0, "Stats Httpd is shutting down."))
+                         isc.config.ccsession.create_answer(0))
         self.assertFalse(self.stats_httpd.running)
         self.assertEqual(
             self.stats_httpd.command_handler("__UNKNOWN_COMMAND__", None),
@@ -360,42 +853,48 @@ class TestStatsHttpd(unittest.TestCase):
                 1, "Unknown command: __UNKNOWN_COMMAND__"))
 
     def test_config(self):
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
         self.assertEqual(
             self.stats_httpd.config_handler(dict(_UNKNOWN_KEY_=None)),
             isc.config.ccsession.create_answer(
-                    1, "Unknown known config: _UNKNOWN_KEY_"))
-        self.assertEqual(
-            self.stats_httpd.config_handler(
-                        dict(listen_on=[dict(address="::2",port=8000)])),
-            isc.config.ccsession.create_answer(0))
-        self.assertTrue("listen_on" in self.stats_httpd.config)
-        for addr in self.stats_httpd.config["listen_on"]:
-            self.assertTrue("address" in addr)
-            self.assertTrue("port" in addr)
-            self.assertTrue(addr["address"] == "::2")
-            self.assertTrue(addr["port"] == 8000)
+                1, "unknown item _UNKNOWN_KEY_"))
 
+        addresses = get_availaddr()
         self.assertEqual(
             self.stats_httpd.config_handler(
-                        dict(listen_on=[dict(address="::1",port=80)])),
+                dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
             isc.config.ccsession.create_answer(0))
         self.assertTrue("listen_on" in self.stats_httpd.config)
         for addr in self.stats_httpd.config["listen_on"]:
             self.assertTrue("address" in addr)
             self.assertTrue("port" in addr)
-            self.assertTrue(addr["address"] == "::1")
-            self.assertTrue(addr["port"] == 80)
+            self.assertTrue(addr["address"] == addresses[0])
+            self.assertTrue(addr["port"] == addresses[1])
 
+        if self.ipv6_enabled:
+            addresses = get_availaddr("::1")
+            self.assertEqual(
+                self.stats_httpd.config_handler(
+                dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
+                isc.config.ccsession.create_answer(0))
+            self.assertTrue("listen_on" in self.stats_httpd.config)
+            for addr in self.stats_httpd.config["listen_on"]:
+                self.assertTrue("address" in addr)
+                self.assertTrue("port" in addr)
+                self.assertTrue(addr["address"] == addresses[0])
+                self.assertTrue(addr["port"] == addresses[1])
+
+        addresses = get_availaddr()
         self.assertEqual(
             self.stats_httpd.config_handler(
-                        dict(listen_on=[dict(address="1.2.3.4",port=54321)])),
+                dict(listen_on=[dict(address=addresses[0],port=addresses[1])])),
             isc.config.ccsession.create_answer(0))
         self.assertTrue("listen_on" in self.stats_httpd.config)
         for addr in self.stats_httpd.config["listen_on"]:
             self.assertTrue("address" in addr)
             self.assertTrue("port" in addr)
-            self.assertTrue(addr["address"] == "1.2.3.4")
-            self.assertTrue(addr["port"] == 54321)
+            self.assertTrue(addr["address"] == addresses[0])
+            self.assertTrue(addr["port"] == addresses[1])
         (ret, arg) = isc.config.ccsession.parse_answer(
             self.stats_httpd.config_handler(
                 dict(listen_on=[dict(address="1.2.3.4",port=543210)]))
@@ -403,93 +902,409 @@ class TestStatsHttpd(unittest.TestCase):
         self.assertEqual(ret, 1)
 
     def test_xml_handler(self):
-        orig_get_stats_data = stats_httpd.StatsHttpd.get_stats_data
-        stats_httpd.StatsHttpd.get_stats_data = lambda x: {'foo':'bar'}
-        xml_body1 = stats_httpd.StatsHttpd().open_template(
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.stats_httpd.get_stats_spec = lambda x,y: \
+            { "Dummy" :
+                  [{
+                        "item_name": "foo",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "bar",
+                        "item_description": "foo is bar",
+                        "item_title": "Foo"
+                        },
+                   {
+                        "item_name": "foo2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Foo bar",
+                        "item_description": "Foo bar",
+                        "list_item_spec": {
+                            "item_name": "foo2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "foo2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Foo2 1 1",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 2",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 3",
+                                    "item_description": "Foo bar"
+                                    }
+                                ]
+                            }
+                        }]
+              }
+        self.stats_httpd.get_stats_data = lambda x,y: \
+            { 'Dummy' : { 'foo':'bar',
+                          'foo2': [
+                            {
+                                "foo2-1-1" : "bar1",
+                                "foo2-1-2" : 10,
+                                "foo2-1-3" : 9
+                                },
+                            {
+                                "foo2-1-1" : "bar2",
+                                "foo2-1-2" : 8,
+                                "foo2-1-3" : 7
+                                }
+                            ] } }
+        xml_body1 = self.stats_httpd.open_template(
             stats_httpd.XML_TEMPLATE_LOCATION).substitute(
-            xml_string='<foo>bar</foo>',
-            xsd_namespace=stats_httpd.XSD_NAMESPACE,
-            xsd_url_path=stats_httpd.XSD_URL_PATH,
+            xml_string='<bind10:statistics xmlns:bind10="http://bind10.isc.org/bind10" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://bind10.isc.org/bind10 ' + stats_httpd.XSD_URL_PATH + '"><Dummy><foo>bar</foo><foo2><foo2-1><foo2-1-1>bar1</foo2-1-1><foo2-1-2>10</foo2-1-2><foo2-1-3>9</foo2-1-3></foo2-1><foo2-1><foo2-1-1>bar2</foo2-1-1><foo2-1-2>8</foo2-1-2><foo2-1-3>7</foo2-1-3></foo2-1></foo2></Dummy></bind10:statistics>',
             xsl_url_path=stats_httpd.XSL_URL_PATH)
-        xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+        xml_body2 = self.stats_httpd.xml_handler()
         self.assertEqual(type(xml_body1), str)
         self.assertEqual(type(xml_body2), str)
         self.assertEqual(xml_body1, xml_body2)
-        stats_httpd.StatsHttpd.get_stats_data = lambda x: {'bar':'foo'}
-        xml_body2 = stats_httpd.StatsHttpd().xml_handler()
+        self.stats_httpd.get_stats_spec = lambda x,y: \
+            { "Dummy" :
+                  [{
+                        "item_name": "bar",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "foo",
+                        "item_description": "bar foo",
+                        "item_title": "Bar"
+                        },
+                   {
+                        "item_name": "bar2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Bar foo",
+                        "item_description": "Bar foo",
+                        "list_item_spec": {
+                            "item_name": "bar2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "bar2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Bar2 1 1",
+                                    "item_description": "Bar foo"
+                                    },
+                                {
+                                    "item_name": "bar2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Bar2 1 2",
+                                    "item_description": "Bar foo"
+                                    },
+                                {
+                                    "item_name": "bar2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Bar2 1 3",
+                                    "item_description": "Bar foo"
+                                    }
+                                ]
+                            }
+                        }]
+              }
+        self.stats_httpd.get_stats_data = lambda x,y: \
+            { 'Dummy' : { 'bar':'foo',
+                          'bar2': [
+                            {
+                                "bar2-1-1" : "foo1",
+                                "bar2-1-2" : 10,
+                                "bar2-1-3" : 9
+                                },
+                            {
+                                "bar2-1-1" : "foo2",
+                                "bar2-1-2" : 8,
+                                "bar2-1-3" : 7
+                                }
+                            ] } }
+        xml_body2 = self.stats_httpd.xml_handler()
         self.assertNotEqual(xml_body1, xml_body2)
-        stats_httpd.StatsHttpd.get_stats_data = orig_get_stats_data
 
     def test_xsd_handler(self):
-        orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
-        stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
-            [{
-                "item_name": "foo",
-                "item_type": "string",
-                "item_optional": False,
-                "item_default": "bar",
-                "item_description": "foo is bar",
-                "item_title": "Foo"
-               }]
-        xsd_body1 = stats_httpd.StatsHttpd().open_template(
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.stats_httpd.get_stats_spec = lambda x,y: \
+            { "Dummy" :
+                  [{
+                        "item_name": "foo",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "bar",
+                        "item_description": "foo is bar",
+                        "item_title": "Foo"
+                        },
+                   {
+                        "item_name": "hoo_time",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "2011-01-01T01:01:01Z",
+                        "item_description": "hoo time",
+                        "item_title": "Hoo Time",
+                        "item_format": "date-time"
+                        },
+                   {
+                        "item_name": "foo2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Foo bar",
+                        "item_description": "Foo bar",
+                        "list_item_spec": {
+                            "item_name": "foo2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "foo2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Foo2 1 1",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 2",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 3",
+                                    "item_description": "Foo bar"
+                                    }
+                                ]
+                            }
+                        }]
+              }
+        xsd_body1 = self.stats_httpd.open_template(
             stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
-            xsd_string='<all>' \
-                + '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
-                + '<annotation><appinfo>Foo</appinfo>' \
-                + '<documentation>foo is bar</documentation>' \
-                + '</annotation></element></all>',
-            xsd_namespace=stats_httpd.XSD_NAMESPACE)
-        xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+            xsd_string='<schema targetNamespace="' + stats_httpd.XSD_NAMESPACE + '" xmlns="http://www.w3.org/2001/XMLSchema" xmlns:bind10="' + stats_httpd.XSD_NAMESPACE + '"><annotation><documentation>XML schema of the statistics data in BIND 10</documentation></annotation><element name="statistics"><annotation><documentation>A set of statistics data</documentation></annotation><complexType><all><element name="Dummy"><complexType><all><element maxOccurs="1" minOccurs="1" name="foo" type="string"><annotation><appinfo>Foo</appinfo><documentation>foo is bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="hoo_time" type="dateTime"><annotation><appinfo>Hoo Time</appinfo><documentation>hoo time</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2"><complexType><sequence><element maxOccurs="unbounded" minOccurs="1" name="foo2-1"><complexType><all><element maxOccurs="1" minOccurs="1" name="foo2-1-1" type="string"><ann
 otation><appinfo>Foo2 1 1</appinfo><documentation>Foo bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2-1-2" type="integer"><annotation><appinfo>Foo2 1 2</appinfo><documentation>Foo bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2-1-3" type="integer"><annotation><appinfo>Foo2 1 3</appinfo><documentation>Foo bar</documentation></annotation></element></all></complexType></element></sequence></complexType></element></all></complexType></element></all></complexType></element></schema>')
+        xsd_body2 = self.stats_httpd.xsd_handler()
         self.assertEqual(type(xsd_body1), str)
         self.assertEqual(type(xsd_body2), str)
         self.assertEqual(xsd_body1, xsd_body2)
-        stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
-            [{
-                "item_name": "bar",
-                "item_type": "string",
-                "item_optional": False,
-                "item_default": "foo",
-                "item_description": "bar is foo",
-                "item_title": "bar"
-               }]
-        xsd_body2 = stats_httpd.StatsHttpd().xsd_handler()
+        self.stats_httpd.get_stats_spec = lambda x,y: \
+            { "Dummy" :
+                  [{
+                        "item_name": "bar",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "foo",
+                        "item_description": "bar is foo",
+                        "item_title": "bar"
+                        },
+                   {
+                        "item_name": "boo_time",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "2012-02-02T02:02:02Z",
+                        "item_description": "boo time",
+                        "item_title": "Boo Time",
+                        "item_format": "date-time"
+                        },
+                   {
+                        "item_name": "foo2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Foo bar",
+                        "item_description": "Foo bar",
+                        "list_item_spec": {
+                            "item_name": "foo2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "foo2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Foo2 1 1",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 2",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 3",
+                                    "item_description": "Foo bar"
+                                    }
+                                ]
+                            }
+                        }]
+              }
+        xsd_body2 = self.stats_httpd.xsd_handler()
         self.assertNotEqual(xsd_body1, xsd_body2)
-        stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
 
     def test_xsl_handler(self):
-        orig_get_stats_spec = stats_httpd.StatsHttpd.get_stats_spec
-        stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
-            [{
-                "item_name": "foo",
-                "item_type": "string",
-                "item_optional": False,
-                "item_default": "bar",
-                "item_description": "foo is bar",
-                "item_title": "Foo"
-               }]
-        xsl_body1 = stats_httpd.StatsHttpd().open_template(
+        self.stats_httpd = MyStatsHttpd(get_availaddr())
+        self.stats_httpd.get_stats_spec = lambda x,y: \
+            { "Dummy" :
+                  [{
+                        "item_name": "foo",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "bar",
+                        "item_description": "foo bar",
+                        "item_title": "Foo"
+                        },
+                   {
+                        "item_name": "foo2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Foo bar",
+                        "item_description": "Foo bar",
+                        "list_item_spec": {
+                            "item_name": "foo2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "foo2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Foo2 1 1",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 2",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 3",
+                                    "item_description": "Foo bar"
+                                    }
+                                ]
+                            }
+                        }]
+              }
+        xsl_body1 = self.stats_httpd.open_template(
             stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
-            xsl_string='<xsl:template match="*"><tr>' \
-                + '<td class="title" title="foo is bar">Foo</td>' \
-                + '<td><xsl:value-of select="foo" /></td>' \
-                + '</tr></xsl:template>',
+            xsl_string='<xsl:template match="bind10:statistics"><table><tr><th>Module Name</th><th>Module Item</th></tr><xsl:for-each select="Dummy"><tr><td><a href="' + stats_httpd.XML_URL_PATH + '/Dummy">Dummy</a></td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><tr><td class="title" title="foo bar"><a href="' + stats_httpd.XML_URL_PATH + '/Dummy/foo">Foo</a></td><td><xsl:value-of select="foo" /></td></tr><xsl:for-each select="foo2"><tr><td class="title" title="Foo bar"><a href="' + stats_httpd.XML_URL_PATH + '/Dummy/foo2">Foo bar</a></td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><xsl:for-each select="foo2-1"><tr><td class="title" title="">foo2-1</td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><tr><td class="title" title="Foo bar">Foo2 1 1</td><td><xsl:value-of select="foo2-1-1" /></td></tr><tr><td class="title" title="Foo bar">Foo2 1 2</td><td><xsl:value-of select="foo2-1-2" /></td></tr><tr><td class="title" title="Foo bar">Foo2 1 3
 </td><td><xsl:value-of select="foo2-1-3" /></td></tr></table></td></tr></xsl:for-each></table></td></tr></xsl:for-each></table></td></tr></xsl:for-each></table></xsl:template>',
             xsd_namespace=stats_httpd.XSD_NAMESPACE)
-        xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+        xsl_body2 = self.stats_httpd.xsl_handler()
         self.assertEqual(type(xsl_body1), str)
         self.assertEqual(type(xsl_body2), str)
         self.assertEqual(xsl_body1, xsl_body2)
-        stats_httpd.StatsHttpd.get_stats_spec = lambda x: \
-            [{
-                "item_name": "bar",
-                "item_type": "string",
-                "item_optional": False,
-                "item_default": "foo",
-                "item_description": "bar is foo",
-                "item_title": "bar"
-               }]
-        xsl_body2 = stats_httpd.StatsHttpd().xsl_handler()
+        self.stats_httpd.get_stats_spec = lambda x,y: \
+            { "Dummy" :
+                  [{
+                        "item_name": "bar",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "foo",
+                        "item_description": "bar is foo",
+                        "item_title": "bar"
+                        }]
+              }
+        xsl_body2 = self.stats_httpd.xsl_handler()
         self.assertNotEqual(xsl_body1, xsl_body2)
-        stats_httpd.StatsHttpd.get_stats_spec = orig_get_stats_spec
 
     def test_for_without_B10_FROM_SOURCE(self):
         # just lets it go through the code without B10_FROM_SOURCE env
@@ -500,8 +1315,6 @@ class TestStatsHttpd(unittest.TestCase):
             imp.reload(stats_httpd)
             os.environ["B10_FROM_SOURCE"] = tmppath
             imp.reload(stats_httpd)
-            stats_httpd.socket = fake_socket
-            stats_httpd.select = fake_select
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index a42c81d..3c8599a 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -13,649 +13,715 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
-#
-# Tests for the stats module
-#
+"""
+In each of these tests we start several virtual components. They are
+not the real components, no external processes are started. They are
+just simple mock objects running each in its own thread and pretending
+to be bind10 modules. This helps testing the stats module in a close
+to real environment.
+"""
+
+import unittest
 import os
-import sys
+import threading
+import io
 import time
-import unittest
 import imp
-from isc.cc.session import Session, SessionError
-from isc.config.ccsession import ModuleCCSession, ModuleCCSessionError
-from fake_time import time, strftime, gmtime
-import stats
-stats.time = time
-stats.strftime = strftime
-stats.gmtime = gmtime
-from stats import SessionSubject, CCSessionListener, get_timestamp, get_datetime
-from fake_time import _TEST_TIME_SECS, _TEST_TIME_STRF
-
-if "B10_FROM_SOURCE" in os.environ:
-    TEST_SPECFILE_LOCATION = os.environ["B10_FROM_SOURCE"] +\
-    "/src/bin/stats/tests/testdata/stats_test.spec"
-else:
-    TEST_SPECFILE_LOCATION = "./testdata/stats_test.spec"
 
-class TestStats(unittest.TestCase):
+import stats
+import isc.cc.session
+from test_utils import BaseModules, ThreadingServerManager, MyStats, SignalHandler, send_command, send_shutdown
+
+class TestUtilties(unittest.TestCase):
+    items = [
+        { 'item_name': 'test_int1',  'item_type': 'integer', 'item_default': 12345      },
+        { 'item_name': 'test_real1', 'item_type': 'real',    'item_default': 12345.6789 },
+        { 'item_name': 'test_bool1', 'item_type': 'boolean', 'item_default': True       },
+        { 'item_name': 'test_str1',  'item_type': 'string',  'item_default': 'ABCD'     },
+        { 'item_name': 'test_list1', 'item_type': 'list',    'item_default': [1,2,3],
+          'list_item_spec' : { 'item_name': 'number',   'item_type': 'integer' } },
+        { 'item_name': 'test_map1',  'item_type': 'map',     'item_default': {'a':1,'b':2,'c':3},
+          'map_item_spec'  : [ { 'item_name': 'a',   'item_type': 'integer'},
+                               { 'item_name': 'b',   'item_type': 'integer'},
+                               { 'item_name': 'c', 'item_type': 'integer'} ] },
+        { 'item_name': 'test_int2',  'item_type': 'integer' },
+        { 'item_name': 'test_real2', 'item_type': 'real'    },
+        { 'item_name': 'test_bool2', 'item_type': 'boolean' },
+        { 'item_name': 'test_str2',  'item_type': 'string'  },
+        { 'item_name': 'test_list2', 'item_type': 'list',
+          'list_item_spec' : { 'item_name': 'number',   'item_type': 'integer' } },
+        { 'item_name': 'test_map2',  'item_type': 'map',
+          'map_item_spec'  : [ { 'item_name': 'A', 'item_type': 'integer'},
+                               { 'item_name': 'B', 'item_type': 'integer'},
+                               { 'item_name': 'C', 'item_type': 'integer'} ] },
+        { 'item_name': 'test_none',  'item_type': 'none'    },
+        { 'item_name': 'test_list3', 'item_type': 'list',    'item_default': ["one","two","three"],
+          'list_item_spec' : { 'item_name': 'number', 'item_type': 'string' } },
+        { 'item_name': 'test_map3',  'item_type': 'map',     'item_default': {'a':'one','b':'two','c':'three'},
+          'map_item_spec'  : [ { 'item_name': 'a', 'item_type': 'string'},
+                               { 'item_name': 'b', 'item_type': 'string'},
+                               { 'item_name': 'c', 'item_type': 'string'} ] }
+        ]
 
     def setUp(self):
-        self.session = Session()
-        self.subject = SessionSubject(session=self.session)
-        self.listener = CCSessionListener(self.subject)
-        self.stats_spec = self.listener.cc_session.get_module_spec().get_config_spec()
-        self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
-        self.stats_data = {
-                'report_time' : get_datetime(),
-                'bind10.boot_time' : "1970-01-01T00:00:00Z",
-                'stats.timestamp' : get_timestamp(),
-                'stats.lname' : self.session.lname,
-                'auth.queries.tcp': 0,
-                'auth.queries.udp': 0,
-                "stats.boot_time": get_datetime(),
-                "stats.start_time": get_datetime(),
-                "stats.last_update_time": get_datetime()
-                }
-        # check starting
-        self.assertFalse(self.subject.running)
-        self.subject.start()
-        self.assertTrue(self.subject.running)
-        self.assertEqual(len(self.session.message_queue), 0)
-        self.assertEqual(self.module_name, 'Stats')
-
-    def tearDown(self):
-        # check closing
-        self.subject.stop()
-        self.assertFalse(self.subject.running)
-        self.subject.detach(self.listener)
-        self.listener.stop()
-        self.session.close()
-
-    def test_local_func(self):
-        """
-        Test for local function
-        
-        """
-        # test for result_ok
-        self.assertEqual(type(result_ok()), dict)
-        self.assertEqual(result_ok(), {'result': [0]})
-        self.assertEqual(result_ok(1), {'result': [1]})
-        self.assertEqual(result_ok(0,'OK'), {'result': [0, 'OK']})
-        self.assertEqual(result_ok(1,'Not good'), {'result': [1, 'Not good']})
-        self.assertEqual(result_ok(None,"It's None"), {'result': [None, "It's None"]})
-        self.assertNotEqual(result_ok(), {'RESULT': [0]})
-
-        # test for get_timestamp
-        self.assertEqual(get_timestamp(), _TEST_TIME_SECS)
-
-        # test for get_datetime
-        self.assertEqual(get_datetime(), _TEST_TIME_STRF)
-
-    def test_show_command(self):
-        """
-        Test for show command
-        
-        """
-        # test show command without arg
-        self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        # ignore under 0.9 seconds
-        self.assertEqual(result_ok(0, self.stats_data), result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test show command with arg
-        self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.lname"}]}, "Stats")
-        self.assertEqual(len(self.subject.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.subject.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'stats.lname': self.stats_data['stats.lname']}),
-                         result_data)
-        self.assertEqual(len(self.subject.session.message_queue), 0)
-
-        # test show command with arg which has wrong name
-        self.session.group_sendmsg({"command": [ "show", {"stats_item_name": "stats.dummy"}]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        # ignore under 0.9 seconds
-        self.assertEqual(result_ok(0, self.stats_data), result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_set_command(self):
-        """
-        Test for set command
-        
-        """
-        # test set command
-        self.stats_data['auth.queries.udp'] = 54321
-        self.assertEqual(self.stats_data['auth.queries.udp'], 54321)
-        self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
-        self.session.group_sendmsg({ "command": [
-                                      "set", {
-                                          'stats_data': {'auth.queries.udp': 54321 }
-                                      } ] },
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test show command
-        self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, self.stats_data), result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test set command 2
-        self.stats_data['auth.queries.udp'] = 0
-        self.assertEqual(self.stats_data['auth.queries.udp'], 0)
-        self.assertEqual(self.stats_data['auth.queries.tcp'], 0)
-        self.session.group_sendmsg({ "command": [ "set", {'stats_data': {'auth.queries.udp': 0}} ]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test show command 2
-        self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, self.stats_data), result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test set command 3
-        self.stats_data['auth.queries.tcp'] = 54322
-        self.assertEqual(self.stats_data['auth.queries.udp'], 0)
-        self.assertEqual(self.stats_data['auth.queries.tcp'], 54322)
-        self.session.group_sendmsg({ "command": [
-                                      "set", {
-                                          'stats_data': {'auth.queries.tcp': 54322 }
-                                      } ] },
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test show command 3
-        self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, self.stats_data), result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_remove_command(self):
-        """
-        Test for remove command
-        
-        """
-        self.session.group_sendmsg({"command":
-                                   [ "remove", {"stats_item_name": 'bind10.boot_time' }]},
-                              "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-        self.assertEqual(self.stats_data.pop('bind10.boot_time'), "1970-01-01T00:00:00Z")
-        self.assertFalse('bind10.boot_time' in self.stats_data)
-
-        # test show command with arg
-        self.session.group_sendmsg({"command":
-                                    [ "show", {"stats_item_name": 'bind10.boot_time'}]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertFalse('bind10.boot_time' in result_data['result'][1])
-        self.assertEqual(result_ok(0, self.stats_data), result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_reset_command(self):
-        """
-        Test for reset command
-        
-        """
-        self.session.group_sendmsg({"command": [ "reset" ] }, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test show command
-        self.session.group_sendmsg({"command": [ "show" ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, self.stats_data), result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_status_command(self):
-        """
-        Test for status command
-        
-        """
-        self.session.group_sendmsg({"command": [ "status" ] }, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(0, "I'm alive."),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_unknown_command(self):
-        """
-        Test for unknown command
-        
-        """
-        self.session.group_sendmsg({"command": [ "hoge", None ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(1, "Unknown command: 'hoge'"),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_shutdown_command(self):
-        """
-        Test for shutdown command
-        
-        """
-        self.session.group_sendmsg({"command": [ "shutdown", None ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.assertTrue(self.subject.running)
-        self.subject.check()
-        self.assertFalse(self.subject.running)
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
+        self.const_timestamp = 1308730448.965706
+        self.const_timetuple = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+        self.const_datetime = '2011-06-22T08:14:08Z'
+        stats.time = lambda : self.const_timestamp
+        stats.gmtime = lambda : self.const_timetuple
 
-    def test_some_commands(self):
-        """
-        Test for some commands in a row
-        
-        """
-        # test set command
-        self.stats_data['bind10.boot_time'] = '2010-08-02T14:47:56Z'
-        self.assertEqual(self.stats_data['bind10.boot_time'], '2010-08-02T14:47:56Z')
-        self.session.group_sendmsg({ "command": [
-                                      "set", {
-                                          'stats_data': {'bind10.boot_time': '2010-08-02T14:47:56Z' }
-                                      }]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # check its value
-        self.session.group_sendmsg({ "command": [
-                                      "show", { 'stats_item_name': 'bind10.boot_time' }
-                                     ] }, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'bind10.boot_time': '2010-08-02T14:47:56Z'}),
-                         result_data)
-        self.assertEqual(result_ok(0, {'bind10.boot_time': self.stats_data['bind10.boot_time']}),
-                         result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test set command 2nd
-        self.stats_data['auth.queries.udp'] = 98765
-        self.assertEqual(self.stats_data['auth.queries.udp'], 98765)
-        self.session.group_sendmsg({ "command": [
-                                      "set", { 'stats_data': {
-                                            'auth.queries.udp':
-                                              self.stats_data['auth.queries.udp']
-                                            } } 
-                                     ] }, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # check its value
-        self.session.group_sendmsg({"command": [
-				      "show", {'stats_item_name': 'auth.queries.udp'}
-                                    ] }, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
-                         result_data)
-        self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
-                         result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test set command 3
-        self.stats_data['auth.queries.tcp'] = 4321
-        self.session.group_sendmsg({"command": [
-                                      "set",
-                                      {'stats_data': {'auth.queries.tcp': 4321 }} ]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # check value
-        self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.tcp'} ]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'auth.queries.tcp': 4321}),
-                         result_data)
-        self.assertEqual(result_ok(0, {'auth.queries.tcp': self.stats_data['auth.queries.tcp']}),
-                         result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        self.session.group_sendmsg({"command": [ "show", {'stats_item_name': 'auth.queries.udp'} ]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'auth.queries.udp': 98765}),
-                         result_data)
-        self.assertEqual(result_ok(0, {'auth.queries.udp': self.stats_data['auth.queries.udp']}),
-                         result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test set command 4
-        self.stats_data['auth.queries.tcp'] = 67890
-        self.session.group_sendmsg({"command": [
-                                      "set", {'stats_data': {'auth.queries.tcp': 67890 }} ]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test show command for all values
-        self.session.group_sendmsg({"command": [ "show", None ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, self.stats_data), result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_some_commands2(self):
-        """
-        Test for some commands in a row using list-type value
-        
-        """
-        self.stats_data['listtype'] = [1, 2, 3]
-        self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
-        self.session.group_sendmsg({ "command": [
-                                      "set", {'stats_data': {'listtype': [1, 2, 3] }}
-                                      ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # check its value
-        self.session.group_sendmsg({ "command": [
-                                      "show", { 'stats_item_name': 'listtype'}
-                                     ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'listtype': [1, 2, 3]}),
-                         result_data)
-        self.assertEqual(result_ok(0, {'listtype': self.stats_data['listtype']}),
-                         result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test set list-type value
-        self.assertEqual(self.stats_data['listtype'], [1, 2, 3])
-        self.session.group_sendmsg({"command": [
-                                      "set", {'stats_data': {'listtype': [3, 2, 1, 0] }}
-                                    ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # check its value
-        self.session.group_sendmsg({ "command": [
-                                      "show", { 'stats_item_name': 'listtype' }
-                                     ] }, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'listtype': [3, 2, 1, 0]}),
-                         result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_some_commands3(self):
-        """
-        Test for some commands in a row using dictionary-type value
-        
-        """
-        self.stats_data['dicttype'] = {"a": 1, "b": 2, "c": 3}
-        self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
-        self.session.group_sendmsg({ "command": [
-                                      "set", {
-                                          'stats_data': {'dicttype': {"a": 1, "b": 2, "c": 3} }
-                                      }]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # check its value
-        self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' } ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'dicttype': {"a": 1, "b": 2, "c": 3}}),
-                         result_data)
-        self.assertEqual(result_ok(0, {'dicttype': self.stats_data['dicttype']}),
-                         result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # test set list-type value
-        self.assertEqual(self.stats_data['dicttype'], {"a": 1, "b": 2, "c": 3})
-        self.session.group_sendmsg({"command": [
-                                      "set", {'stats_data': {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }} ]},
-                                   "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-        self.assertEqual(len(self.session.message_queue), 0)
-
-        # check its value
-        self.session.group_sendmsg({ "command": [ "show", { 'stats_item_name': 'dicttype' }]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        result_data = self.session.get_message("Stats", None)
-        self.assertEqual(result_ok(0, {'dicttype': {"a": 3, "b": 2, "c": 1, "d": 0} }),
-                         result_data)
-        self.assertEqual(len(self.session.message_queue), 0)
-
-    def test_config_update(self):
-        """
-        Test for config update
-        
-        """
-        # test show command without arg
-        self.session.group_sendmsg({"command": [ "config_update", {"x-version":999} ]}, "Stats")
-        self.assertEqual(len(self.session.message_queue), 1)
-        self.subject.check()
-        self.assertEqual(result_ok(),
-                         self.session.get_message("Stats", None))
-
-    def test_for_boss(self):
-        last_queue = self.session.old_message_queue.pop()
-        self.assertEqual(
-            last_queue.msg, {'command': ['sendstats']})
+    def test_get_spec_defaults(self):
         self.assertEqual(
-            last_queue.env['group'], 'Boss')
-
-class TestStats2(unittest.TestCase):
+            stats.get_spec_defaults(self.items), {
+                'test_int1'  : 12345              ,
+                'test_real1' : 12345.6789         ,
+                'test_bool1' : True               ,
+                'test_str1'  : 'ABCD'             ,
+                'test_list1' : [1,2,3]            ,
+                'test_map1'  : {'a':1,'b':2,'c':3},
+                'test_int2'  : 0 ,
+                'test_real2' : 0.0,
+                'test_bool2' : False,
+                'test_str2'  : "",
+                'test_list2' : [0],
+                'test_map2'  : { 'A' : 0, 'B' : 0, 'C' : 0 },
+                'test_none'  : None,
+                'test_list3' : [ "one", "two", "three" ],
+                'test_map3'  : { 'a' : 'one', 'b' : 'two', 'c' : 'three' } })
+        self.assertEqual(stats.get_spec_defaults(None), {})
+        self.assertRaises(KeyError, stats.get_spec_defaults, [{'item_name':'Foo'}])
+
+    def test_get_timestamp(self):
+        self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+
+    def test_get_datetime(self):
+        self.assertEqual(stats.get_datetime(), self.const_datetime)
+        self.assertNotEqual(stats.get_datetime(
+                (2011, 6, 22, 8, 23, 40, 2, 173, 0)), self.const_datetime)
+
+class TestCallback(unittest.TestCase):
+    def setUp(self):
+        self.dummy_func = lambda *x, **y : (x, y)
+        self.dummy_args = (1,2,3)
+        self.dummy_kwargs = {'a':1,'b':2,'c':3}
+        self.cback1 = stats.Callback(
+            command=self.dummy_func,
+            args=self.dummy_args,
+            kwargs=self.dummy_kwargs
+            )
+        self.cback2 = stats.Callback(
+            args=self.dummy_args,
+            kwargs=self.dummy_kwargs
+            )
+        self.cback3 = stats.Callback(
+            command=self.dummy_func,
+            kwargs=self.dummy_kwargs
+            )
+        self.cback4 = stats.Callback(
+            command=self.dummy_func,
+            args=self.dummy_args
+            )
+
+    def test_init(self):
+        self.assertEqual((self.cback1.command, self.cback1.args, self.cback1.kwargs),
+                         (self.dummy_func, self.dummy_args, self.dummy_kwargs))
+        self.assertEqual((self.cback2.command, self.cback2.args, self.cback2.kwargs),
+                         (None, self.dummy_args, self.dummy_kwargs))
+        self.assertEqual((self.cback3.command, self.cback3.args, self.cback3.kwargs),
+                         (self.dummy_func, (), self.dummy_kwargs))
+        self.assertEqual((self.cback4.command, self.cback4.args, self.cback4.kwargs),
+                         (self.dummy_func, self.dummy_args, {}))
+
+    def test_call(self):
+        self.assertEqual(self.cback1(), (self.dummy_args, self.dummy_kwargs))
+        self.assertEqual(self.cback1(100, 200), ((100, 200), self.dummy_kwargs))
+        self.assertEqual(self.cback1(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
+        self.assertEqual(self.cback2(), None)
+        self.assertEqual(self.cback3(), ((), self.dummy_kwargs))
+        self.assertEqual(self.cback3(100, 200), ((100, 200), self.dummy_kwargs))
+        self.assertEqual(self.cback3(a=100, b=200), ((), {'a':100, 'b':200}))
+        self.assertEqual(self.cback4(), (self.dummy_args, {}))
+        self.assertEqual(self.cback4(100, 200), ((100, 200), {}))
+        self.assertEqual(self.cback4(a=100, b=200), (self.dummy_args, {'a':100, 'b':200}))
 
+class TestStats(unittest.TestCase):
     def setUp(self):
-        self.session = Session()
-        self.subject = SessionSubject(session=self.session)
-        self.listener = CCSessionListener(self.subject)
-        self.module_name = self.listener.cc_session.get_module_spec().get_module_name()
-        # check starting
-        self.assertFalse(self.subject.running)
-        self.subject.start()
-        self.assertTrue(self.subject.running)
-        self.assertEqual(len(self.session.message_queue), 0)
-        self.assertEqual(self.module_name, 'Stats')
+        # set the signal handler for deadlock
+        self.sig_handler = SignalHandler(self.fail)
+        self.base = BaseModules()
+        self.stats = stats.Stats()
+        self.const_timestamp = 1308730448.965706
+        self.const_datetime = '2011-06-22T08:14:08Z'
+        self.const_default_datetime = '1970-01-01T00:00:00Z'
 
     def tearDown(self):
-        # check closing
-        self.subject.stop()
-        self.assertFalse(self.subject.running)
-        self.subject.detach(self.listener)
-        self.listener.stop()
+        self.base.shutdown()
+        # reset the signal handler
+        self.sig_handler.reset()
+
+    def test_init(self):
+        self.assertEqual(self.stats.module_name, 'Stats')
+        self.assertFalse(self.stats.running)
+        self.assertTrue('command_show' in self.stats.callbacks)
+        self.assertTrue('command_status' in self.stats.callbacks)
+        self.assertTrue('command_shutdown' in self.stats.callbacks)
+        self.assertTrue('command_show' in self.stats.callbacks)
+        self.assertTrue('command_showschema' in self.stats.callbacks)
+        self.assertTrue('command_set' in self.stats.callbacks)
+
+    def test_init_undefcmd(self):
+        spec_str = """\
+{
+  "module_spec": {
+    "module_name": "Stats",
+    "module_description": "Stats daemon",
+    "config_data": [],
+    "commands": [
+      {
+        "command_name": "_undef_command_",
+        "command_description": "a undefined command in stats",
+        "command_args": []
+      }
+    ],
+    "statistics": []
+  }
+}
+"""
+        orig_spec_location = stats.SPECFILE_LOCATION
+        stats.SPECFILE_LOCATION = io.StringIO(spec_str)
+        self.assertRaises(stats.StatsError, stats.Stats)
+        stats.SPECFILE_LOCATION = orig_spec_location
+
+    def test_start(self):
+        # start without err
+        self.stats_server = ThreadingServerManager(MyStats)
+        self.stats = self.stats_server.server
+        self.assertFalse(self.stats.running)
+        self.stats_server.run()
+        self.assertEqual(send_command("status", "Stats"),
+                (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+        self.assertTrue(self.stats.running)
+        self.assertEqual(send_shutdown("Stats"), (0, None))
+        self.assertFalse(self.stats.running)
+        self.stats_server.shutdown()
+
+        # start with err
+        self.stats = stats.Stats()
+        self.stats.update_statistics_data = lambda x,**y: ['an error']
+        self.assertRaises(stats.StatsError, self.stats.start)
+
+    def test_handlers(self):
+        self.stats_server = ThreadingServerManager(MyStats)
+        self.stats = self.stats_server.server
+        self.stats_server.run()
+        # config_handler
+        self.assertEqual(self.stats.config_handler({'foo':'bar'}),
+                         isc.config.create_answer(0))
+
+        # command_handler
+        self.base.boss.server._started.wait()
+        self.base.boss.server._started.clear()
+        self.assertEqual(
+            send_command(
+                'show', 'Stats',
+                params={ 'owner' : 'Boss',
+                  'name'  : 'boot_time' }),
+            (0, {'Boss': {'boot_time': self.const_datetime}}))
+        self.assertEqual(
+            send_command(
+                'set', 'Stats',
+                params={ 'owner' : 'Boss',
+                  'data'  : { 'boot_time' : self.const_datetime } }),
+            (0, None))
+        self.assertEqual(
+            send_command(
+                'show', 'Stats',
+                params={ 'owner' : 'Boss',
+                  'name'  : 'boot_time' }),
+            (0, {'Boss': {'boot_time': self.const_datetime}}))
+        self.assertEqual(
+            send_command('status', 'Stats'),
+            (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+        (rcode, value) = send_command('show', 'Stats')
+        self.assertEqual(rcode, 0)
+        self.assertEqual(len(value), 3)
+        self.assertTrue('Boss' in value)
+        self.assertTrue('Stats' in value)
+        self.assertTrue('Auth' in value)
+        self.assertEqual(len(value['Stats']), 5)
+        self.assertEqual(len(value['Boss']), 1)
+        self.assertTrue('boot_time' in value['Boss'])
+        self.assertEqual(value['Boss']['boot_time'], self.const_datetime)
+        self.assertTrue('report_time' in value['Stats'])
+        self.assertTrue('boot_time' in value['Stats'])
+        self.assertTrue('last_update_time' in value['Stats'])
+        self.assertTrue('timestamp' in value['Stats'])
+        self.assertTrue('lname' in value['Stats'])
+        (rcode, value) = send_command('showschema', 'Stats')
+        self.assertEqual(rcode, 0)
+        self.assertEqual(len(value), 3)
+        self.assertTrue('Boss' in value)
+        self.assertTrue('Stats' in value)
+        self.assertTrue('Auth' in value)
+        self.assertEqual(len(value['Stats']), 5)
+        self.assertEqual(len(value['Boss']), 1)
+        for item in value['Boss']:
+            self.assertTrue(len(item) == 7)
+            self.assertTrue('item_name' in item)
+            self.assertTrue('item_type' in item)
+            self.assertTrue('item_optional' in item)
+            self.assertTrue('item_default' in item)
+            self.assertTrue('item_title' in item)
+            self.assertTrue('item_description' in item)
+            self.assertTrue('item_format' in item)
+        for item in value['Stats']:
+            self.assertTrue(len(item) == 6 or len(item) == 7)
+            self.assertTrue('item_name' in item)
+            self.assertTrue('item_type' in item)
+            self.assertTrue('item_optional' in item)
+            self.assertTrue('item_default' in item)
+            self.assertTrue('item_title' in item)
+            self.assertTrue('item_description' in item)
+            if len(item) == 7:
+                self.assertTrue('item_format' in item)
 
-    def test_specfile(self):
+        self.assertEqual(
+            send_command('__UNKNOWN__', 'Stats'),
+            (1, "Unknown command: '__UNKNOWN__'"))
+
+        self.stats_server.shutdown()
+
+    def test_update_modules(self):
+        self.assertEqual(len(self.stats.modules), 0)
+        self.stats.update_modules()
+        self.assertTrue('Stats' in self.stats.modules)
+        self.assertTrue('Boss' in self.stats.modules)
+        self.assertFalse('Dummy' in self.stats.modules)
+        my_statistics_data = stats.get_spec_defaults(self.stats.modules['Stats'].get_statistics_spec())
+        self.assertTrue('report_time' in my_statistics_data)
+        self.assertTrue('boot_time' in my_statistics_data)
+        self.assertTrue('last_update_time' in my_statistics_data)
+        self.assertTrue('timestamp' in my_statistics_data)
+        self.assertTrue('lname' in my_statistics_data)
+        self.assertEqual(my_statistics_data['report_time'], self.const_default_datetime)
+        self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+        self.assertEqual(my_statistics_data['last_update_time'], self.const_default_datetime)
+        self.assertEqual(my_statistics_data['timestamp'], 0.0)
+        self.assertEqual(my_statistics_data['lname'], "")
+        my_statistics_data = stats.get_spec_defaults(self.stats.modules['Boss'].get_statistics_spec())
+        self.assertTrue('boot_time' in my_statistics_data)
+        self.assertEqual(my_statistics_data['boot_time'], self.const_default_datetime)
+        orig_parse_answer = stats.isc.config.ccsession.parse_answer
+        stats.isc.config.ccsession.parse_answer = lambda x: (99, 'error')
+        self.assertRaises(stats.StatsError, self.stats.update_modules)
+        stats.isc.config.ccsession.parse_answer = orig_parse_answer
+
+    def test_get_statistics_data(self):
+        my_statistics_data = self.stats.get_statistics_data()
+        self.assertTrue('Stats' in my_statistics_data)
+        self.assertTrue('Boss' in my_statistics_data)
+        self.assertTrue('boot_time' in my_statistics_data['Boss'])
+        my_statistics_data = self.stats.get_statistics_data(owner='Stats')
+        self.assertTrue('Stats' in my_statistics_data)
+        self.assertTrue('report_time' in my_statistics_data['Stats'])
+        self.assertTrue('boot_time' in my_statistics_data['Stats'])
+        self.assertTrue('last_update_time' in my_statistics_data['Stats'])
+        self.assertTrue('timestamp' in my_statistics_data['Stats'])
+        self.assertTrue('lname' in my_statistics_data['Stats'])
+        self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
+        my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
+        self.assertEqual(my_statistics_data['Stats']['report_time'], self.const_default_datetime)
+        my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
+        self.assertEqual(my_statistics_data['Stats']['boot_time'], self.const_default_datetime)
+        my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
+        self.assertEqual(my_statistics_data['Stats']['last_update_time'], self.const_default_datetime)
+        my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
+        self.assertEqual(my_statistics_data['Stats']['timestamp'], 0.0)
+        my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
+        self.assertEqual(my_statistics_data, {'Stats': {'lname':''}})
+        self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+                          owner='Stats', name='Bar')
+        self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+                          owner='Foo', name='Bar')
+        self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
+                          name='Bar')
+
+    def test_update_statistics_data(self):
+        self.stats.update_statistics_data(owner='Stats', lname='foo at bar')
+        self.assertTrue('Stats' in self.stats.statistics_data)
+        my_statistics_data = self.stats.statistics_data['Stats']
+        self.assertEqual(my_statistics_data['lname'], 'foo at bar')
+        self.stats.update_statistics_data(owner='Stats', last_update_time=self.const_datetime)
+        self.assertTrue('Stats' in self.stats.statistics_data)
+        my_statistics_data = self.stats.statistics_data['Stats']
+        self.assertEqual(my_statistics_data['last_update_time'], self.const_datetime)
+        self.assertEqual(self.stats.update_statistics_data(owner='Stats', lname=0.0),
+                         ['0.0 should be a string'])
+        self.assertEqual(self.stats.update_statistics_data(owner='Dummy', foo='bar'),
+                         ['unknown module name: Dummy'])
+
+    def test_commands(self):
+        # status
+        self.assertEqual(self.stats.command_status(),
+                isc.config.create_answer(
+                0, "Stats is up. (PID " + str(os.getpid()) + ")"))
+
+        # shutdown
+        self.stats.running = True
+        self.assertEqual(self.stats.command_shutdown(),
+                         isc.config.create_answer(0))
+        self.assertFalse(self.stats.running)
+
+    def test_command_show(self):
+        self.assertEqual(self.stats.command_show(owner='Foo', name=None),
+                         isc.config.create_answer(
+                1, "specified arguments are incorrect: owner: Foo, name: None"))
+        self.assertEqual(self.stats.command_show(owner='Foo', name='_bar_'),
+                         isc.config.create_answer(
+                1, "specified arguments are incorrect: owner: Foo, name: _bar_"))
+        self.assertEqual(self.stats.command_show(owner='Foo', name='bar'),
+                         isc.config.create_answer(
+                1, "specified arguments are incorrect: owner: Foo, name: bar"))
+        self.assertEqual(self.stats.command_show(owner='Auth'),
+                         isc.config.create_answer(
+                0, {'Auth':{ 'queries.udp': 0,
+                     'queries.tcp': 0,
+                     'queries.perzone': [{ 'zonename': 'test1.example',
+                                           'queries.udp': 1,
+                                           'queries.tcp': 2 },
+                                         { 'zonename': 'test2.example',
+                                           'queries.udp': 3,
+                                           'queries.tcp': 4 }] }}))
+        self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
+                         isc.config.create_answer(
+                0, {'Auth': {'queries.udp':0}}))
+        self.assertEqual(self.stats.command_show(owner='Auth', name='queries.perzone'),
+                         isc.config.create_answer(
+                0, {'Auth': {'queries.perzone': [{ 'zonename': 'test1.example',
+                      'queries.udp': 1,
+                      'queries.tcp': 2 },
+                    { 'zonename': 'test2.example',
+                      'queries.udp': 3,
+                      'queries.tcp': 4 }]}}))
+        orig_get_timestamp = stats.get_timestamp
+        orig_get_datetime = stats.get_datetime
+        stats.get_timestamp = lambda : self.const_timestamp
+        stats.get_datetime = lambda : self.const_datetime
+        self.assertEqual(stats.get_timestamp(), self.const_timestamp)
+        self.assertEqual(stats.get_datetime(), self.const_datetime)
+        self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
+                             isc.config.create_answer(0, {'Stats': {'report_time':self.const_datetime}}))
+        self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
+        self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
+        stats.get_timestamp = orig_get_timestamp
+        stats.get_datetime = orig_get_datetime
+        self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+            { "module_name": self.stats.module_name,
+              "statistics": [] } )
+        self.assertRaises(
+            stats.StatsError, self.stats.command_show, owner='Foo', name='bar')
+
+    def test_command_showchema(self):
+        (rcode, value) = isc.config.ccsession.parse_answer(
+            self.stats.command_showschema())
+        self.assertEqual(rcode, 0)
+        self.assertEqual(len(value), 3)
+        self.assertTrue('Stats' in value)
+        self.assertTrue('Boss' in value)
+        self.assertTrue('Auth' in value)
+        self.assertFalse('__Dummy__' in value)
+        schema = value['Stats']
+        self.assertEqual(len(schema), 5)
+        for item in schema:
+            self.assertTrue(len(item) == 6 or len(item) == 7)
+            self.assertTrue('item_name' in item)
+            self.assertTrue('item_type' in item)
+            self.assertTrue('item_optional' in item)
+            self.assertTrue('item_default' in item)
+            self.assertTrue('item_title' in item)
+            self.assertTrue('item_description' in item)
+            if len(item) == 7:
+                self.assertTrue('item_format' in item)
+
+        schema = value['Boss']
+        self.assertEqual(len(schema), 1)
+        for item in schema:
+            self.assertTrue(len(item) == 7)
+            self.assertTrue('item_name' in item)
+            self.assertTrue('item_type' in item)
+            self.assertTrue('item_optional' in item)
+            self.assertTrue('item_default' in item)
+            self.assertTrue('item_title' in item)
+            self.assertTrue('item_description' in item)
+            self.assertTrue('item_format' in item)
+
+        schema = value['Auth']
+        self.assertEqual(len(schema), 3)
+        for item in schema:
+            if item['item_type'] == 'list':
+                self.assertEqual(len(item), 7)
+            else:
+                self.assertEqual(len(item), 6)
+            self.assertTrue('item_name' in item)
+            self.assertTrue('item_type' in item)
+            self.assertTrue('item_optional' in item)
+            self.assertTrue('item_default' in item)
+            self.assertTrue('item_title' in item)
+            self.assertTrue('item_description' in item)
+
+        (rcode, value) = isc.config.ccsession.parse_answer(
+            self.stats.command_showschema(owner='Stats'))
+        self.assertEqual(rcode, 0)
+        self.assertTrue('Stats' in value)
+        self.assertFalse('Boss' in value)
+        self.assertFalse('Auth' in value)
+        for item in value['Stats']:
+            self.assertTrue(len(item) == 6 or len(item) == 7)
+            self.assertTrue('item_name' in item)
+            self.assertTrue('item_type' in item)
+            self.assertTrue('item_optional' in item)
+            self.assertTrue('item_default' in item)
+            self.assertTrue('item_title' in item)
+            self.assertTrue('item_description' in item)
+            if len(item) == 7:
+                self.assertTrue('item_format' in item)
+
+        (rcode, value) = isc.config.ccsession.parse_answer(
+            self.stats.command_showschema(owner='Stats', name='report_time'))
+        self.assertEqual(rcode, 0)
+        self.assertTrue('Stats' in value)
+        self.assertFalse('Boss' in value)
+        self.assertFalse('Auth' in value)
+        self.assertEqual(len(value['Stats'][0]), 7)
+        self.assertTrue('item_name' in value['Stats'][0])
+        self.assertTrue('item_type' in value['Stats'][0])
+        self.assertTrue('item_optional' in value['Stats'][0])
+        self.assertTrue('item_default' in value['Stats'][0])
+        self.assertTrue('item_title' in value['Stats'][0])
+        self.assertTrue('item_description' in value['Stats'][0])
+        self.assertTrue('item_format' in value['Stats'][0])
+        self.assertEqual(value['Stats'][0]['item_name'], 'report_time')
+        self.assertEqual(value['Stats'][0]['item_format'], 'date-time')
+
+        self.assertEqual(self.stats.command_showschema(owner='Foo'),
+                         isc.config.create_answer(
+                1, "specified arguments are incorrect: owner: Foo, name: None"))
+        self.assertEqual(self.stats.command_showschema(owner='Foo', name='bar'),
+                         isc.config.create_answer(
+                1, "specified arguments are incorrect: owner: Foo, name: bar"))
+        self.assertEqual(self.stats.command_showschema(owner='Auth'),
+                         isc.config.create_answer(
+                0, {'Auth': [{
+                        "item_default": 0,
+                        "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+                        "item_name": "queries.tcp",
+                        "item_optional": False,
+                        "item_title": "Queries TCP",
+                        "item_type": "integer"
+                        },
+                    {
+                        "item_default": 0,
+                        "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially",
+                        "item_name": "queries.udp",
+                        "item_optional": False,
+                        "item_title": "Queries UDP",
+                        "item_type": "integer"
+                        },
+                    {
+                        "item_name": "queries.perzone",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1.example",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2.example",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Queries per zone",
+                        "item_description": "Queries per zone",
+                        "list_item_spec": {
+                            "item_name": "zones",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "zonename",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Zonename",
+                                    "item_description": "Zonename"
+                                    },
+                                {
+                                    "item_name": "queries.udp",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Queries UDP per zone",
+                                    "item_description": "A number of UDP query counts per zone"
+                                    },
+                                {
+                                    "item_name": "queries.tcp",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Queries TCP per zone",
+                                    "item_description": "A number of TCP query counts per zone"
+                                    }
+                                ]
+                            }
+                        }]}))
+        self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
+                         isc.config.create_answer(
+                0, {'Auth': [{
+                    "item_default": 0,
+                    "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
+                    "item_name": "queries.tcp",
+                    "item_optional": False,
+                    "item_title": "Queries TCP",
+                    "item_type": "integer"
+                    }]}))
+        self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.perzone'),
+                         isc.config.create_answer(
+                0, {'Auth':[{
+                    "item_name": "queries.perzone",
+                    "item_type": "list",
+                    "item_optional": False,
+                    "item_default": [
+                        {
+                            "zonename" : "test1.example",
+                            "queries.udp" : 1,
+                            "queries.tcp" : 2
+                            },
+                        {
+                            "zonename" : "test2.example",
+                            "queries.udp" : 3,
+                            "queries.tcp" : 4
+                            }
+                    ],
+                    "item_title": "Queries per zone",
+                    "item_description": "Queries per zone",
+                    "list_item_spec": {
+                        "item_name": "zones",
+                        "item_type": "map",
+                        "item_optional": False,
+                        "item_default": {},
+                        "map_item_spec": [
+                            {
+                                "item_name": "zonename",
+                                "item_type": "string",
+                                "item_optional": False,
+                                "item_default": "",
+                                "item_title": "Zonename",
+                                "item_description": "Zonename"
+                                },
+                            {
+                                "item_name": "queries.udp",
+                                "item_type": "integer",
+                                "item_optional": False,
+                                "item_default": 0,
+                                "item_title": "Queries UDP per zone",
+                                "item_description": "A number of UDP query counts per zone"
+                                },
+                            {
+                                "item_name": "queries.tcp",
+                                "item_type": "integer",
+                                "item_optional": False,
+                                "item_default": 0,
+                                "item_title": "Queries TCP per zone",
+                                "item_description": "A number of TCP query counts per zone"
+                                }
+                            ]
+                        }
+                    }]}))
+
+        self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
+                         isc.config.create_answer(
+                1, "specified arguments are incorrect: owner: Stats, name: bar"))
+        self.assertEqual(self.stats.command_showschema(name='bar'),
+                         isc.config.create_answer(
+                1, "module name is not specified"))
+
+    def test_command_set(self):
+        orig_get_datetime = stats.get_datetime
+        stats.get_datetime = lambda : self.const_datetime
+        (rcode, value) = isc.config.ccsession.parse_answer(
+            self.stats.command_set(owner='Boss',
+                                   data={ 'boot_time' : self.const_datetime }))
+        stats.get_datetime = orig_get_datetime
+        self.assertEqual(rcode, 0)
+        self.assertTrue(value is None)
+        self.assertEqual(self.stats.statistics_data['Boss']['boot_time'],
+                         self.const_datetime)
+        self.assertEqual(self.stats.statistics_data['Stats']['last_update_time'],
+                         self.const_datetime)
+        self.assertEqual(self.stats.command_set(owner='Stats',
+                                                data={ 'lname' : 'foo at bar' }),
+                         isc.config.create_answer(0, None))
+        self.stats.statistics_data['Stats'] = {}
+        self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+            { "module_name": self.stats.module_name,
+              "statistics": [] } )
+        self.assertEqual(self.stats.command_set(owner='Stats',
+                                                data={ 'lname' : '_foo_ at _bar_' }),
+                         isc.config.create_answer(
+                1,
+                "errors while setting statistics data: unknown item lname"))
+        self.stats.statistics_data['Stats'] = {}
+        self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+            { "module_name": self.stats.module_name } )
+        self.assertEqual(self.stats.command_set(owner='Stats',
+                                                data={ 'lname' : '_foo_ at _bar_' }),
+                         isc.config.create_answer(
+                1,
+                "errors while setting statistics data: No statistics specification"))
+        self.stats.statistics_data['Stats'] = {}
+        self.stats.mccs.specification = isc.config.module_spec.ModuleSpec(
+            { "module_name": self.stats.module_name,
+              "statistics": [
+                    {
+                        "item_name": "dummy",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "",
+                        "item_title": "Local Name",
+                        "item_description": "brabra"
+                        } ] } )
+        self.assertRaises(stats.StatsError,
+                          self.stats.command_set, owner='Stats', data={ 'dummy' : '_xxxx_yyyy_zzz_' })
+
+class TestOSEnv(unittest.TestCase):
+    def test_osenv(self):
         """
-        Test for specfile
-        
+        test for the environ variable "B10_FROM_SOURCE"
+        "B10_FROM_SOURCE" is set in Makefile
         """
-        if "B10_FROM_SOURCE" in os.environ:
-            self.assertEqual(stats.SPECFILE_LOCATION,
+        # test case having B10_FROM_SOURCE
+        self.assertTrue("B10_FROM_SOURCE" in os.environ)
+        self.assertEqual(stats.SPECFILE_LOCATION, \
                              os.environ["B10_FROM_SOURCE"] + os.sep + \
-                                 "src" + os.sep + "bin" + os.sep + "stats" + \
-                                 os.sep + "stats.spec")
-            self.assertEqual(stats.SCHEMA_SPECFILE_LOCATION,
-                             os.environ["B10_FROM_SOURCE"] + os.sep + \
-                                 "src" + os.sep + "bin" + os.sep + "stats" + \
-                                 os.sep + "stats-schema.spec")
+                             "src" + os.sep + "bin" + os.sep + "stats" + \
+                             os.sep + "stats.spec")
+        # test case not having B10_FROM_SOURCE
+        path = os.environ["B10_FROM_SOURCE"]
+        os.environ.pop("B10_FROM_SOURCE")
+        self.assertFalse("B10_FROM_SOURCE" in os.environ)
+        # import stats again
+        imp.reload(stats)
+        # revert the changes
+        os.environ["B10_FROM_SOURCE"] = path
         imp.reload(stats)
-        # change path of SPECFILE_LOCATION
-        stats.SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
-        stats.SCHEMA_SPECFILE_LOCATION = TEST_SPECFILE_LOCATION
-        self.assertEqual(stats.SPECFILE_LOCATION, TEST_SPECFILE_LOCATION)
-        self.subject = stats.SessionSubject(session=self.session)
-        self.session = self.subject.session
-        self.listener = stats.CCSessionListener(self.subject)
-
-        self.assertEqual(self.listener.stats_spec, [])
-        self.assertEqual(self.listener.stats_data, {})
-
-        self.assertEqual(self.listener.commands_spec, [
-                {
-                    "command_name": "status",
-                    "command_description": "identify whether stats module is alive or not",
-                    "command_args": []
-                },
-                {
-                    "command_name": "the_dummy",
-                    "command_description": "this is for testing",
-                    "command_args": []
-                }])
-
-    def test_func_initialize_data(self):
-        """
-        Test for initialize_data function 
-        
-        """
-        # prepare for sample data set
-        stats_spec = [
-            {
-                "item_name": "none_sample",
-                "item_type": "null",
-                "item_default": "None"
-            },
-            {
-                "item_name": "boolean_sample",
-                "item_type": "boolean",
-                "item_default": True
-            },
-            {
-                "item_name": "string_sample",
-                "item_type": "string",
-                "item_default": "A something"
-            },
-            {
-                "item_name": "int_sample",
-                "item_type": "integer",
-                "item_default": 9999999
-            },
-            {
-                "item_name": "real_sample",
-                "item_type": "real",
-                "item_default": 0.0009
-            },
-            {
-                "item_name": "list_sample",
-                "item_type": "list",
-                "item_default": [0, 1, 2, 3, 4],
-                "list_item_spec": []
-            },
-            {
-                "item_name": "map_sample",
-                "item_type": "map",
-                "item_default": {'name':'value'},
-                "map_item_spec": []
-            },
-            {
-                "item_name": "other_sample",
-                "item_type": "__unknown__",
-                "item_default": "__unknown__"
-            }
-        ]
-        # data for comparison
-        stats_data = {
-            'none_sample': None,
-            'boolean_sample': True,
-            'string_sample': 'A something',
-            'int_sample': 9999999,
-            'real_sample': 0.0009,
-            'list_sample': [0, 1, 2, 3, 4],
-            'map_sample': {'name':'value'},
-            'other_sample': '__unknown__'
-        }
-        self.assertEqual(self.listener.initialize_data(stats_spec), stats_data)
-
-    def test_func_main(self):
-        # explicitly make failed
-        self.session.close()
-        stats.main(session=self.session)
 
-    def test_osenv(self):
-        """
-        test for not having environ "B10_FROM_SOURCE"
-        """
-        if "B10_FROM_SOURCE" in os.environ:
-            path = os.environ["B10_FROM_SOURCE"]
-            os.environ.pop("B10_FROM_SOURCE")
-            imp.reload(stats)
-            os.environ["B10_FROM_SOURCE"] = path
-            imp.reload(stats)
-
-def result_ok(*args):
-    if args:
-        return { 'result': list(args) }
-    else:
-        return { 'result': [ 0 ] }
+def test_main():
+    unittest.main()
 
 if __name__ == "__main__":
-    unittest.main()
+    test_main()
diff --git a/src/bin/stats/tests/fake_select.py b/src/bin/stats/tests/fake_select.py
deleted file mode 100644
index ca0ca82..0000000
--- a/src/bin/stats/tests/fake_select.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of select
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-import errno
-
-class error(Exception):
-    pass
-
-def select(rlst, wlst, xlst, timeout):
-    if type(timeout) != int and type(timeout) != float:
-            raise TypeError("Error: %s must be integer or float"
-                            % timeout.__class__.__name__)
-    for s in rlst + wlst + xlst:
-        if type(s) != fake_socket.socket:
-            raise TypeError("Error: %s must be a dummy socket"
-                            % s.__class__.__name__)
-        s._called = s._called + 1
-        if s._called > 3:
-            raise error("Something is happened!")
-        elif s._called > 2:
-            raise error(errno.EINTR)
-    return (rlst, wlst, xlst)
diff --git a/src/bin/stats/tests/fake_socket.py b/src/bin/stats/tests/fake_socket.py
deleted file mode 100644
index 4e3a458..0000000
--- a/src/bin/stats/tests/fake_socket.py
+++ /dev/null
@@ -1,70 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of socket
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import re
-
-AF_INET = 'AF_INET'
-AF_INET6 = 'AF_INET6'
-_ADDRFAMILY = AF_INET
-has_ipv6 = True
-_CLOSED = False
-
-class gaierror(Exception):
-    pass
-
-class error(Exception):
-    pass
-
-class socket:
-
-    def __init__(self, family=None):
-        if family is None:
-            self.address_family = _ADDRFAMILY
-        else:
-            self.address_family = family
-        self._closed = _CLOSED
-        if self._closed:
-            raise error('socket is already closed!')
-        self._called = 0
-
-    def close(self):
-        self._closed = True
-
-    def fileno(self):
-        return id(self)
-
-    def bind(self, server_class):
-        (self.server_address, self.server_port) = server_class
-        if self.address_family not in set([AF_INET, AF_INET6]):
-            raise error("Address family not supported by protocol: %s" % self.address_family)
-        if self.address_family == AF_INET6 and not has_ipv6:
-            raise error("Address family not supported in this machine: %s has_ipv6: %s"
-                        % (self.address_family, str(has_ipv6)))
-        if self.address_family == AF_INET and re.search(':', self.server_address) is not None:
-            raise gaierror("Address family for hostname not supported : %s %s" % (self.server_address, self.address_family))
-        if self.address_family == AF_INET6 and re.search(':', self.server_address) is None:
-            raise error("Cannot assign requested address : %s" % str(self.server_address))
-        if type(self.server_port) is not int:
-            raise TypeError("an integer is required: %s" % str(self.server_port))
-        if self.server_port < 0 or self.server_port > 65535:
-            raise OverflowError("port number must be 0-65535.: %s" % str(self.server_port))
diff --git a/src/bin/stats/tests/fake_time.py b/src/bin/stats/tests/fake_time.py
deleted file mode 100644
index 65e0237..0000000
--- a/src/bin/stats/tests/fake_time.py
+++ /dev/null
@@ -1,47 +0,0 @@
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-__version__ = "$Revision$"
-
-# This is a dummy time class against a Python standard time class.
-# It is just testing use only.
-# Other methods which time class has is not implemented.
-# (This class isn't orderloaded for time class.)
-
-# These variables are constant. These are example.
-_TEST_TIME_SECS = 1283364938.229088
-_TEST_TIME_STRF = '2010-09-01T18:15:38Z'
-
-def time():
-    """
-    This is a dummy time() method against time.time()
-    """
-    # return float constant value
-    return _TEST_TIME_SECS
-
-def gmtime():
-    """
-    This is a dummy gmtime() method against time.gmtime()
-    """
-    # always return nothing
-    return None
-
-def strftime(*arg):
-    """
-    This is a dummy gmtime() method against time.gmtime()
-    """
-    return _TEST_TIME_STRF
-
-
diff --git a/src/bin/stats/tests/http/Makefile.am b/src/bin/stats/tests/http/Makefile.am
deleted file mode 100644
index 79263a9..0000000
--- a/src/bin/stats/tests/http/Makefile.am
+++ /dev/null
@@ -1,6 +0,0 @@
-EXTRA_DIST = __init__.py server.py
-CLEANFILES = __init__.pyc server.pyc
-CLEANDIRS = __pycache__
-
-clean-local:
-	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/http/__init__.py b/src/bin/stats/tests/http/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/http/server.py b/src/bin/stats/tests/http/server.py
deleted file mode 100644
index 70ed6fa..0000000
--- a/src/bin/stats/tests/http/server.py
+++ /dev/null
@@ -1,96 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of http.server
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import fake_socket
-
-class DummyHttpResponse:
-    def __init__(self, path):
-        self.path = path
-        self.headers={}
-        self.log = ""
-
-    def _write_log(self, msg):
-        self.log = self.log + msg
-
-class HTTPServer:
-    """
-    A mock-up class of http.server.HTTPServer
-    """
-    address_family = fake_socket.AF_INET
-    def __init__(self, server_class, handler_class):
-        self.socket = fake_socket.socket(self.address_family)
-        self.server_class = server_class
-        self.socket.bind(self.server_class)
-        self._handler = handler_class(None, None, self)
-
-    def handle_request(self):
-        pass
-
-    def server_close(self):
-        self.socket.close()
-
-class BaseHTTPRequestHandler:
-    """
-    A mock-up class of http.server.BaseHTTPRequestHandler
-    """
-
-    def __init__(self, request, client_address, server):
-        self.path = "/path/to"
-        self.headers = {}
-        self.server = server
-        self.response = DummyHttpResponse(path=self.path)
-        self.response.write = self._write
-        self.wfile = self.response
-
-    def send_response(self, code=0):
-        if self.path != self.response.path:
-            self.response = DummyHttpResponse(path=self.path)
-        self.response.code = code
-
-    def send_header(self, key, value):
-        if self.path != self.response.path:
-            self.response = DummyHttpResponse(path=self.path)
-        self.response.headers[key] = value
-
-    def end_headers(self):
-        if self.path != self.response.path:
-            self.response = DummyHttpResponse(path=self.path)
-        self.response.wrote_headers = True
-
-    def send_error(self, code, message=None):
-        if self.path != self.response.path:
-            self.response = DummyHttpResponse(path=self.path)
-        self.response.code = code
-        self.response.body = message
-
-    def address_string(self):
-        return 'dummyhost'
-
-    def log_date_time_string(self):
-        return '[DD/MM/YYYY HH:MI:SS]'
-
-    def _write(self, obj):
-        if self.path != self.response.path:
-            self.response = DummyHttpResponse(path=self.path)
-        self.response.body = obj.decode()
-
diff --git a/src/bin/stats/tests/isc/Makefile.am b/src/bin/stats/tests/isc/Makefile.am
deleted file mode 100644
index d31395d..0000000
--- a/src/bin/stats/tests/isc/Makefile.am
+++ /dev/null
@@ -1,8 +0,0 @@
-SUBDIRS = cc config util log
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
-	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/__init__.py b/src/bin/stats/tests/isc/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/cc/Makefile.am b/src/bin/stats/tests/isc/cc/Makefile.am
deleted file mode 100644
index 67323b5..0000000
--- a/src/bin/stats/tests/isc/cc/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py session.py
-CLEANFILES = __init__.pyc session.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
-	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/cc/__init__.py b/src/bin/stats/tests/isc/cc/__init__.py
deleted file mode 100644
index 9a3eaf6..0000000
--- a/src/bin/stats/tests/isc/cc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.cc.session import *
diff --git a/src/bin/stats/tests/isc/cc/session.py b/src/bin/stats/tests/isc/cc/session.py
deleted file mode 100644
index e16d6a9..0000000
--- a/src/bin/stats/tests/isc/cc/session.py
+++ /dev/null
@@ -1,148 +0,0 @@
-# Copyright (C) 2010,2011  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import sys
-import fake_socket
-
-# set a dummy lname
-_TEST_LNAME = '123abc at xxxx'
-
-class Queue():
-    def __init__(self, msg=None, env={}):
-        self.msg = msg
-        self.env = env
-
-    def dump(self):
-        return { 'msg': self.msg, 'env': self.env }
-               
-class SessionError(Exception):
-    pass
-
-class SessionTimeout(Exception):
-    pass
-
-class Session:
-    def __init__(self, socket_file=None, verbose=False):
-        self._lname = _TEST_LNAME
-        self.message_queue = []
-        self.old_message_queue = []
-        try:
-            self._socket = fake_socket.socket()
-        except fake_socket.error as se:
-            raise SessionError(se)
-        self.verbose = verbose
-
-    @property
-    def lname(self):
-        return self._lname
-
-    def close(self):
-        self._socket.close()
-
-    def _clear_queues(self):
-        while len(self.message_queue) > 0:
-            self.dequeue()
-
-    def _next_sequence(self, que=None):
-        return len(self.message_queue)
-
-    def enqueue(self, msg=None, env={}):
-        if self._socket._closed:
-            raise SessionError("Session has been closed.")
-        seq = self._next_sequence()
-        env.update({"seq": 0}) # fixed here
-        que = Queue(msg=msg, env=env)
-        self.message_queue.append(que)
-        if self.verbose:
-            sys.stdout.write("[Session] enqueue: " + str(que.dump()) + "\n")
-        return seq
-
-    def dequeue(self):
-        if self._socket._closed:
-            raise SessionError("Session has been closed.")
-        que = None
-        try:
-            que = self.message_queue.pop(0) # always pop at index 0
-            self.old_message_queue.append(que)
-        except IndexError:
-            que = Queue()
-        if self.verbose:
-            sys.stdout.write("[Session] dequeue: " + str(que.dump()) + "\n")
-        return que
-
-    def get_queue(self, seq=None):
-        if self._socket._closed:
-            raise SessionError("Session has been closed.")
-        if seq is None:
-            seq = len(self.message_queue) - 1
-        que = None
-        try:
-            que = self.message_queue[seq]
-        except IndexError:
-            raise IndexError
-            que = Queue()
-        if self.verbose:
-            sys.stdout.write("[Session] get_queue: " + str(que.dump()) + "\n")
-        return que
-
-    def group_sendmsg(self, msg, group, instance="*", to="*"):
-        return self.enqueue(msg=msg, env={
-                "type": "send",
-                "from": self._lname,
-                "to": to,
-                "group": group,
-                "instance": instance })
-
-    def group_recvmsg(self, nonblock=True, seq=0):
-        que = self.dequeue()
-        return que.msg, que.env
-        
-    def group_reply(self, routing, msg):
-        return self.enqueue(msg=msg, env={
-                "type": "send",
-                "from": self._lname,
-                "to": routing["from"],
-                "group": routing["group"],
-                "instance": routing["instance"],
-                "reply": routing["seq"] })
-
-    def get_message(self, group, to='*'):
-        if self._socket._closed:
-            raise SessionError("Session has been closed.")
-        que = Queue()
-        for q in self.message_queue:
-            if q.env['group'] == group:
-                self.message_queue.remove(q)
-                self.old_message_queue.append(q)
-                que = q
-        if self.verbose:
-            sys.stdout.write("[Session] get_message: " + str(que.dump()) + "\n")
-        return q.msg
-
-    def group_subscribe(self, group, instance = "*"):
-        if self._socket._closed:
-            raise SessionError("Session has been closed.")
-
-    def group_unsubscribe(self, group, instance = "*"):
-        if self._socket._closed:
-            raise SessionError("Session has been closed.")
diff --git a/src/bin/stats/tests/isc/config/Makefile.am b/src/bin/stats/tests/isc/config/Makefile.am
deleted file mode 100644
index ffbecda..0000000
--- a/src/bin/stats/tests/isc/config/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py ccsession.py
-CLEANFILES = __init__.pyc ccsession.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
-	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/config/__init__.py b/src/bin/stats/tests/isc/config/__init__.py
deleted file mode 100644
index 4c49e95..0000000
--- a/src/bin/stats/tests/isc/config/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from isc.config.ccsession import *
diff --git a/src/bin/stats/tests/isc/config/ccsession.py b/src/bin/stats/tests/isc/config/ccsession.py
deleted file mode 100644
index a4e9c37..0000000
--- a/src/bin/stats/tests/isc/config/ccsession.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (C) 2010,2011  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A mock-up module of isc.cc.session
-
-*** NOTE ***
-It is only for testing stats_httpd module and not reusable for
-external module.
-"""
-
-import json
-import os
-from isc.cc.session import Session
-
-COMMAND_CONFIG_UPDATE = "config_update"
-
-def parse_answer(msg):
-    assert 'result' in msg
-    try:
-        return msg['result'][0], msg['result'][1]
-    except IndexError:
-        return msg['result'][0], None
-
-def create_answer(rcode, arg = None):
-    if arg is None:
-        return { 'result': [ rcode ] }
-    else:
-        return { 'result': [ rcode, arg ] }
-
-def parse_command(msg):
-    assert 'command' in msg
-    try:
-        return msg['command'][0], msg['command'][1]
-    except IndexError:
-        return msg['command'][0], None
-
-def create_command(command_name, params = None):
-    if params is None:
-        return {"command": [command_name]}
-    else:
-        return {"command": [command_name, params]}
-
-def module_spec_from_file(spec_file, check = True):
-    try:
-        file = open(spec_file)
-        json_str = file.read()
-        module_spec = json.loads(json_str)
-        file.close()
-        return ModuleSpec(module_spec['module_spec'], check)
-    except IOError as ioe:
-        raise ModuleSpecError("JSON read error: " + str(ioe))
-    except ValueError as ve:
-        raise ModuleSpecError("JSON parse error: " + str(ve))
-    except KeyError as err:
-        raise ModuleSpecError("Data definition has no module_spec element")
-
-class ModuleSpecError(Exception):
-    pass
-
-class ModuleSpec:
-    def __init__(self, module_spec, check = True):
-        self._module_spec = module_spec
-
-    def get_config_spec(self):
-        return self._module_spec['config_data']
-
-    def get_commands_spec(self):
-        return self._module_spec['commands']
-
-    def get_module_name(self):
-        return self._module_spec['module_name']
-
-class ModuleCCSessionError(Exception):
-    pass
-
-class DataNotFoundError(Exception):
-    pass
-
-class ConfigData:
-    def __init__(self, specification):
-        self.specification = specification
-
-    def get_value(self, identifier):
-        """Returns a tuple where the first item is the value at the
-           given identifier, and the second item is absolutely False
-           even if the value is an unset default or not. Raises an
-           DataNotFoundError if the identifier is not found in the
-           specification file.
-           *** NOTE ***
-           There are some differences from the original method. This
-           method never handles local settings like the original
-           method. But these different behaviors aren't so big issues
-           for a mock-up method of stats_httpd because stats_httpd
-           calls this method at only first."""
-        for config_map in self.get_module_spec().get_config_spec():
-            if config_map['item_name'] == identifier:
-                if 'item_default' in config_map:
-                    return config_map['item_default'], False
-        raise DataNotFoundError("item_name %s is not found in the specfile" % identifier)
-
-    def get_module_spec(self):
-        return self.specification
-
-class ModuleCCSession(ConfigData):
-    def __init__(self, spec_file_name, config_handler, command_handler, cc_session = None):
-        module_spec = module_spec_from_file(spec_file_name)
-        ConfigData.__init__(self, module_spec)
-        self._module_name = module_spec.get_module_name()
-        self.set_config_handler(config_handler)
-        self.set_command_handler(command_handler)
-        if not cc_session:
-            self._session = Session(verbose=True)
-        else:
-            self._session = cc_session
-
-    def start(self):
-        pass
-
-    def close(self):
-        self._session.close()
-
-    def check_command(self, nonblock=True):
-        msg, env = self._session.group_recvmsg(nonblock)
-        if not msg or 'result' in msg:
-            return
-        cmd, arg = parse_command(msg)
-        answer = None
-        if cmd == COMMAND_CONFIG_UPDATE and self._config_handler:
-            answer = self._config_handler(arg)
-        elif env['group'] == self._module_name and self._command_handler:
-            answer = self._command_handler(cmd, arg)
-        if answer:
-            self._session.group_reply(env, answer)
-
-    def set_config_handler(self, config_handler):
-        self._config_handler = config_handler
-        # should we run this right now since we've changed the handler?
-
-    def set_command_handler(self, command_handler):
-        self._command_handler = command_handler
-
-    def get_module_spec(self):
-        return self.specification
-
-    def get_socket(self):
-        return self._session._socket
-
diff --git a/src/bin/stats/tests/isc/log/Makefile.am b/src/bin/stats/tests/isc/log/Makefile.am
deleted file mode 100644
index 457b9de..0000000
--- a/src/bin/stats/tests/isc/log/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py
-CLEANFILES = __init__.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
-	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/log/__init__.py b/src/bin/stats/tests/isc/log/__init__.py
deleted file mode 100644
index 641cf79..0000000
--- a/src/bin/stats/tests/isc/log/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Copyright (C) 2011  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-# This file is not installed. The log.so is installed into the right place.
-# It is only to find it in the .libs directory when we run as a test or
-# from the build directory.
-# But as nobody gives us the builddir explicitly (and we can't use generation
-# from .in file, as it would put us into the builddir and we wouldn't be found)
-# we guess from current directory. Any idea for something better? This should
-# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
-# Should we look there? Or define something in bind10_config?
-
-import os
-import sys
-
-for base in sys.path[:]:
-    loglibdir = os.path.join(base, 'isc/log/.libs')
-    if os.path.exists(loglibdir):
-        sys.path.insert(0, loglibdir)
-
-from log import *
diff --git a/src/bin/stats/tests/isc/util/Makefile.am b/src/bin/stats/tests/isc/util/Makefile.am
deleted file mode 100644
index 9c74354..0000000
--- a/src/bin/stats/tests/isc/util/Makefile.am
+++ /dev/null
@@ -1,7 +0,0 @@
-EXTRA_DIST = __init__.py process.py
-CLEANFILES = __init__.pyc process.pyc
-
-CLEANDIRS = __pycache__
-
-clean-local:
-	rm -rf $(CLEANDIRS)
diff --git a/src/bin/stats/tests/isc/util/__init__.py b/src/bin/stats/tests/isc/util/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/src/bin/stats/tests/isc/util/process.py b/src/bin/stats/tests/isc/util/process.py
deleted file mode 100644
index 0f764c1..0000000
--- a/src/bin/stats/tests/isc/util/process.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-"""
-A dummy function of isc.util.process.rename()
-"""
-
-def rename(name=None):
-    pass
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
new file mode 100644
index 0000000..3f6ff33
--- /dev/null
+++ b/src/bin/stats/tests/test_utils.py
@@ -0,0 +1,424 @@
+"""
+Utilities and mock modules for unittests of statistics modules
+
+"""
+import os
+import io
+import time
+import sys
+import threading
+import tempfile
+import json
+import signal
+
+import msgq
+import isc.config.cfgmgr
+import stats
+import stats_httpd
+
+class SignalHandler():
+    """A signal handler class for deadlock in unittest"""
+    def __init__(self, fail_handler, timeout=20):
+        """sets a schedule in SIGARM for invoking the handler via
+        unittest.TestCase after timeout seconds (default is 20)"""
+        self.fail_handler = fail_handler
+        self.orig_handler = signal.signal(signal.SIGALRM, self.sig_handler)
+        signal.alarm(timeout)
+
+    def reset(self):
+        """resets the schedule in SIGALRM"""
+        signal.alarm(0)
+        signal.signal(signal.SIGALRM, self.orig_handler)
+
+    def sig_handler(self, signal, frame):
+        """envokes unittest.TestCase.fail as a signal handler"""
+        self.fail_handler("A deadlock might be detected")
+
+def send_command(command_name, module_name, params=None, session=None, nonblock=False, timeout=None):
+    if session is not None:
+        cc_session = session
+    else:
+        cc_session = isc.cc.Session()
+    if timeout is not None:
+        orig_timeout = cc_session.get_timeout()
+        cc_session.set_timeout(timeout * 1000)
+    command = isc.config.ccsession.create_command(command_name, params)
+    seq = cc_session.group_sendmsg(command, module_name)
+    try:
+        (answer, env) = cc_session.group_recvmsg(nonblock, seq)
+        if answer:
+            return isc.config.ccsession.parse_answer(answer)
+    except isc.cc.SessionTimeout:
+        pass
+    finally:
+        if timeout is not None:
+            cc_session.set_timeout(orig_timeout)
+        if session is None:
+            cc_session.close()
+
+def send_shutdown(module_name, **kwargs):
+    return send_command("shutdown", module_name, **kwargs)
+
+class ThreadingServerManager:
+    def __init__(self, server, *args, **kwargs):
+        self.server = server(*args, **kwargs)
+        self.server_name = server.__name__
+        self.server._thread = threading.Thread(
+            name=self.server_name, target=self.server.run)
+        self.server._thread.daemon = True
+
+    def run(self):
+        self.server._thread.start()
+        self.server._started.wait()
+        self.server._started.clear()
+
+    def shutdown(self):
+        self.server.shutdown()
+        self.server._thread.join(0) # timeout is 0
+
+def do_nothing(*args, **kwargs): pass
+
+class dummy_sys:
+    """Dummy for sys"""
+    class dummy_io:
+        write = do_nothing
+    stdout = stderr = dummy_io()
+
+class MockMsgq:
+    def __init__(self):
+        self._started = threading.Event()
+        # suppress output to stdout and stderr
+        msgq.sys = dummy_sys()
+        msgq.print = do_nothing
+        self.msgq = msgq.MsgQ(verbose=False)
+        result = self.msgq.setup()
+        if result:
+            sys.exit("Error on Msgq startup: %s" % result)
+
+    def run(self):
+        self._started.set()
+        try:
+            self.msgq.run()
+        except Exception:
+            pass
+        finally:
+            # explicitly shut down the socket of the msgq before
+            # shutting down the msgq
+            self.msgq.listen_socket.shutdown(msgq.socket.SHUT_RDWR)
+            self.msgq.shutdown()
+
+    def shutdown(self):
+        # do nothing
+        pass
+
+class MockCfgmgr:
+    def __init__(self):
+        self._started = threading.Event()
+        self.cfgmgr = isc.config.cfgmgr.ConfigManager(
+            os.environ['CONFIG_TESTDATA_PATH'], "b10-config.db")
+        self.cfgmgr.read_config()
+
+    def run(self):
+        self._started.set()
+        try:
+            self.cfgmgr.run()
+        except Exception:
+            pass
+
+    def shutdown(self):
+        self.cfgmgr.running = False
+
+class MockBoss:
+    spec_str = """\
+{
+  "module_spec": {
+    "module_name": "Boss",
+    "module_description": "Mock Master process",
+    "config_data": [],
+    "commands": [
+      {
+        "command_name": "sendstats",
+        "command_description": "Send data to a statistics module at once",
+        "command_args": []
+      }
+    ],
+    "statistics": [
+      {
+        "item_name": "boot_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "Boot time",
+        "item_description": "A date time when bind10 process starts initially",
+        "item_format": "date-time"
+      }
+    ]
+  }
+}
+"""
+    _BASETIME = (2011, 6, 22, 8, 14, 8, 2, 173, 0)
+
+    def __init__(self):
+        self._started = threading.Event()
+        self.running = False
+        self.spec_file = io.StringIO(self.spec_str)
+        # create ModuleCCSession object
+        self.mccs = isc.config.ModuleCCSession(
+            self.spec_file,
+            self.config_handler,
+            self.command_handler)
+        self.spec_file.close()
+        self.cc_session = self.mccs._session
+        self.got_command_name = ''
+
+    def run(self):
+        self.mccs.start()
+        self.running = True
+        self._started.set()
+        try:
+            while self.running:
+                self.mccs.check_command(False)
+        except Exception:
+            pass
+
+    def shutdown(self):
+        self.running = False
+
+    def config_handler(self, new_config):
+        return isc.config.create_answer(0)
+
+    def command_handler(self, command, *args, **kwargs):
+        self._started.set()
+        self.got_command_name = command
+        params = { "owner": "Boss",
+                   "data": {
+                'boot_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', self._BASETIME)
+                }
+                   }
+        if command == 'sendstats':
+            send_command("set", "Stats", params=params, session=self.cc_session)
+            return isc.config.create_answer(0)
+        elif command == 'getstats':
+            return isc.config.create_answer(0, params)
+        return isc.config.create_answer(1, "Unknown Command")
+
+class MockAuth:
+    spec_str = """\
+{
+  "module_spec": {
+    "module_name": "Auth",
+    "module_description": "Mock Authoritative service",
+    "config_data": [],
+    "commands": [
+      {
+        "command_name": "sendstats",
+        "command_description": "Send data to a statistics module at once",
+        "command_args": []
+      }
+    ],
+    "statistics": [
+      {
+        "item_name": "queries.tcp",
+        "item_type": "integer",
+        "item_optional": false,
+        "item_default": 0,
+        "item_title": "Queries TCP",
+        "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially"
+      },
+      {
+        "item_name": "queries.udp",
+        "item_type": "integer",
+        "item_optional": false,
+        "item_default": 0,
+        "item_title": "Queries UDP",
+        "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+      },
+      {
+        "item_name": "queries.perzone",
+        "item_type": "list",
+        "item_optional": false,
+        "item_default": [
+          {
+            "zonename" : "test1.example",
+            "queries.udp" : 1,
+            "queries.tcp" : 2
+          },
+          {
+            "zonename" : "test2.example",
+            "queries.udp" : 3,
+            "queries.tcp" : 4
+          }
+        ],
+        "item_title": "Queries per zone",
+        "item_description": "Queries per zone",
+        "list_item_spec": {
+          "item_name": "zones",
+          "item_type": "map",
+          "item_optional": false,
+          "item_default": {},
+          "map_item_spec": [
+            {
+              "item_name": "zonename",
+              "item_type": "string",
+              "item_optional": false,
+              "item_default": "",
+              "item_title": "Zonename",
+              "item_description": "Zonename"
+            },
+            {
+              "item_name": "queries.udp",
+              "item_type": "integer",
+              "item_optional": false,
+              "item_default": 0,
+              "item_title": "Queries UDP per zone",
+              "item_description": "A number of UDP query counts per zone"
+            },
+            {
+              "item_name": "queries.tcp",
+              "item_type": "integer",
+              "item_optional": false,
+              "item_default": 0,
+              "item_title": "Queries TCP per zone",
+              "item_description": "A number of TCP query counts per zone"
+            }
+          ]
+        }
+      }
+    ]
+  }
+}
+"""
+    def __init__(self):
+        self._started = threading.Event()
+        self.running = False
+        self.spec_file = io.StringIO(self.spec_str)
+        # create ModuleCCSession object
+        self.mccs = isc.config.ModuleCCSession(
+            self.spec_file,
+            self.config_handler,
+            self.command_handler)
+        self.spec_file.close()
+        self.cc_session = self.mccs._session
+        self.got_command_name = ''
+        self.queries_tcp = 3
+        self.queries_udp = 2
+        self.queries_per_zone = [{
+                'zonename': 'test1.example',
+                'queries.tcp': 5,
+                'queries.udp': 4
+                }]
+
+    def run(self):
+        self.mccs.start()
+        self.running = True
+        self._started.set()
+        try:
+            while self.running:
+                self.mccs.check_command(False)
+        except Exception:
+            pass
+
+    def shutdown(self):
+        self.running = False
+
+    def config_handler(self, new_config):
+        return isc.config.create_answer(0)
+
+    def command_handler(self, command, *args, **kwargs):
+        self.got_command_name = command
+        if command == 'sendstats':
+            params = { "owner": "Auth",
+                       "data": { 'queries.tcp': self.queries_tcp,
+                                 'queries.udp': self.queries_udp,
+                                 'queries.per-zone' : self.queries_per_zone } }
+            return send_command("set", "Stats", params=params, session=self.cc_session)
+        return isc.config.create_answer(1, "Unknown Command")
+
+class MyStats(stats.Stats):
+    def __init__(self):
+        self._started = threading.Event()
+        stats.Stats.__init__(self)
+
+    def run(self):
+        self._started.set()
+        try:
+            self.start()
+        except Exception:
+            pass
+
+    def shutdown(self):
+        self.command_shutdown()
+
+class MyStatsHttpd(stats_httpd.StatsHttpd):
+    ORIG_SPECFILE_LOCATION = stats_httpd.SPECFILE_LOCATION
+    def __init__(self, *server_address):
+        self._started = threading.Event()
+        if server_address:
+            stats_httpd.SPECFILE_LOCATION = self.create_specfile(*server_address)
+            try:
+                stats_httpd.StatsHttpd.__init__(self)
+            finally:
+                if hasattr(stats_httpd.SPECFILE_LOCATION, "close"):
+                    stats_httpd.SPECFILE_LOCATION.close()
+                stats_httpd.SPECFILE_LOCATION = self.ORIG_SPECFILE_LOCATION
+        else:
+            stats_httpd.StatsHttpd.__init__(self)
+
+    def create_specfile(self, *server_address):
+        spec_io = open(self.ORIG_SPECFILE_LOCATION)
+        try:
+            spec = json.load(spec_io)
+            spec_io.close()
+            config = spec['module_spec']['config_data']
+            for i in range(len(config)):
+                if config[i]['item_name'] == 'listen_on':
+                    config[i]['item_default'] = \
+                        [ dict(address=a[0], port=a[1]) for a in server_address ]
+                    break
+            return io.StringIO(json.dumps(spec))
+        finally:
+            spec_io.close()
+
+    def run(self):
+        self._started.set()
+        try:
+            self.start()
+        except Exception:
+            pass
+
+    def shutdown(self):
+        self.command_handler('shutdown', None)
+
+class BaseModules:
+    def __init__(self):
+        # MockMsgq
+        self.msgq = ThreadingServerManager(MockMsgq)
+        self.msgq.run()
+        # Check whether msgq is ready. A SessionTimeout is raised here if not.
+        isc.cc.session.Session().close()
+        # MockCfgmgr
+        self.cfgmgr = ThreadingServerManager(MockCfgmgr)
+        self.cfgmgr.run()
+        # MockBoss
+        self.boss = ThreadingServerManager(MockBoss)
+        self.boss.run()
+        # MockAuth
+        self.auth = ThreadingServerManager(MockAuth)
+        self.auth.run()
+
+    def shutdown(self):
+        # MockAuth
+        self.auth.shutdown()
+        # MockBoss
+        self.boss.shutdown()
+        # MockCfgmgr
+        self.cfgmgr.shutdown()
+        # MockMsgq
+        self.msgq.shutdown()
+        # remove the unused socket file
+        socket_file = self.msgq.server.msgq.socket_file
+        try:
+            if os.path.exists(socket_file):
+                os.remove(socket_file)
+        except OSError:
+            pass
diff --git a/src/bin/stats/tests/testdata/Makefile.am b/src/bin/stats/tests/testdata/Makefile.am
deleted file mode 100644
index 1b8df6d..0000000
--- a/src/bin/stats/tests/testdata/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-EXTRA_DIST = stats_test.spec
diff --git a/src/bin/stats/tests/testdata/stats_test.spec b/src/bin/stats/tests/testdata/stats_test.spec
deleted file mode 100644
index 8136756..0000000
--- a/src/bin/stats/tests/testdata/stats_test.spec
+++ /dev/null
@@ -1,19 +0,0 @@
-{
-  "module_spec": {
-    "module_name": "Stats",
-    "module_description": "Stats daemon",
-    "config_data": [],
-    "commands": [
-      {
-        "command_name": "status",
-        "command_description": "identify whether stats module is alive or not",
-        "command_args": []
-      },
-      {
-        "command_name": "the_dummy",
-        "command_description": "this is for testing",
-        "command_args": []
-      }
-    ]
-  }
-}
diff --git a/src/bin/tests/Makefile.am b/src/bin/tests/Makefile.am
index b5bcea2..41b497f 100644
--- a/src/bin/tests/Makefile.am
+++ b/src/bin/tests/Makefile.am
@@ -1,5 +1,6 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = process_rename_test.py
+noinst_SCRIPTS = $(PYTESTS)
 # .py will be generated by configure, so we don't have to include it
 # in EXTRA_DIST.
 
@@ -7,19 +8,20 @@ PYTESTS = process_rename_test.py
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
 check-local:
 if ENABLE_PYTHON_COVERAGE
-	touch $(abs_top_srcdir)/.coverage 
+	touch $(abs_top_srcdir)/.coverage
 	rm -f .coverage
 	${LN_S} $(abs_top_srcdir)/.coverage .coverage
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	chmod +x $(abs_builddir)/$$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/tests/process_rename_test.py.in b/src/bin/tests/process_rename_test.py.in
index 4b45210..f96c023 100644
--- a/src/bin/tests/process_rename_test.py.in
+++ b/src/bin/tests/process_rename_test.py.in
@@ -38,8 +38,10 @@ class TestRename(unittest.TestCase):
         Then scan them by looking at the source text
         (without actually running them)
         """
-        # Regexp to find all the *_SCRIPTS = something lines,
-        # including line continuations (backslash and newline)
+        # Regexp to find all the *_SCRIPTS = something lines (except for
+        # noinst_SCRIPTS, which are scripts for tests), including line
+        # continuations (backslash and newline)
+        excluded_lines = re.compile(r'^(noinst_SCRIPTS.*$)', re.MULTILINE)
         lines = re.compile(r'^\w+_SCRIPTS\s*=\s*((.|\\\n)*)$',
             re.MULTILINE)
         # Script name regular expression
@@ -53,7 +55,8 @@ class TestRename(unittest.TestCase):
             if 'Makefile' in fs:
                 makefile = ''.join(open(os.path.join(d,
                     "Makefile")).readlines())
-                for (var, _) in lines.findall(makefile):
+                for (var, _) in lines.findall(re.sub(excluded_lines, '',
+                                                     makefile)):
                     for (script, _) in scripts.findall(var):
                         self.__scan(d, script, fun)
 
diff --git a/src/bin/xfrin/Makefile.am b/src/bin/xfrin/Makefile.am
index 0af9be6..8d80b22 100644
--- a/src/bin/xfrin/Makefile.am
+++ b/src/bin/xfrin/Makefile.am
@@ -6,9 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrin
 
 b10_xfrindir = $(pkgdatadir)
 b10_xfrin_DATA = xfrin.spec
-pyexec_DATA = xfrin_messages.py
 
-CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py xfrin_messages.py xfrin_messages.pyc
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrin xfrin.pyc xfrinlog.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.pyc
 
 man_MANS = b10-xfrin.8
 EXTRA_DIST = $(man_MANS) b10-xfrin.xml
@@ -22,11 +26,12 @@ b10-xfrin.8: b10-xfrin.xml
 endif
 
 # Define rule to build logging source files from message file
-xfrin_messages.py: xfrin_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrin/xfrin_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py : xfrin_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrin_messages.mes
 
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrin: xfrin.py xfrin_messages.py
+b10-xfrin: xfrin.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrin_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrin.py >$@
 	chmod a+x $@
diff --git a/src/bin/xfrin/b10-xfrin.8 b/src/bin/xfrin/b10-xfrin.8
index 3ea2293..056103a 100644
--- a/src/bin/xfrin/b10-xfrin.8
+++ b/src/bin/xfrin/b10-xfrin.8
@@ -2,12 +2,12 @@
 .\"     Title: b10-xfrin
 .\"    Author: [FIXME: author] [see http://docbook.sf.net/el/author]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: May 19, 2011
+.\"      Date: October 12, 2011
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"
-.TH "B10\-XFRIN" "8" "May 19, 2011" "BIND10" "BIND10"
+.TH "B10\-XFRIN" "8" "October 12, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" -----------------------------------------------------------------
@@ -29,23 +29,11 @@ The
 \fBb10\-xfrin\fR
 daemon provides the BIND 10 incoming DNS zone transfer service\&. Normally it is started by the
 \fBbind10\fR(8)
-boss process\&. When triggered it can request and receive a zone transfer and store the zone in a BIND 10 zone data store\&.
-.if n \{\
-.sp
-.\}
-.RS 4
-.it 1 an-trap
-.nr an-no-space-flag 1
-.nr an-break-flag 1
-.br
-.ps +1
-\fBNote\fR
-.ps -1
-.br
-.sp
-This prototype release only supports AXFR\&. IXFR is not implemented\&.
-.sp .5v
-.RE
+boss process\&. When triggered it can request and receive a zone transfer and store the zone in a BIND 10 zone data source\&.
+.PP
+The
+\fBb10\-xfrin\fR
+daemon supports both AXFR and IXFR\&. Due to some implementation limitations of the current development release, however, it only tries AXFR by default, and care should be taken to enable IXFR\&. See the BIND 10 Guide for more details\&.
 .PP
 This daemon communicates with BIND 10 over a
 \fBb10-msgq\fR(8)
@@ -61,7 +49,7 @@ receives its configurations from
 .PP
 The configurable settings are:
 .PP
-\fItransfers\-in\fR
+\fItransfers_in\fR
 defines the maximum number of inbound zone transfers that can run concurrently\&. The default is 10\&.
 .PP
 
@@ -71,10 +59,15 @@ is a list of zones known to the
 daemon\&. The list items are:
 \fIname\fR
 (the zone name),
+\fIclass\fR
+(defaults to
+\(lqIN\(rq),
 \fImaster_addr\fR
 (the zone master to transfer from),
 \fImaster_port\fR
-(defaults to 53), and
+(defaults to 53),
+\fIuse_ixfr\fR
+(defaults to false), and
 \fItsig_key\fR
 (optional TSIG key to use)\&. The
 \fItsig_key\fR
@@ -125,7 +118,7 @@ to define the class (defaults to
 \fImaster\fR
 to define the IP address of the authoritative server to transfer from, and
 \fIport\fR
-to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the values previously defined in the
+to define the port number on the authoritative server (defaults to 53)\&. If the address or port is not specified, it will use the value previously defined in the
 \fIzones\fR
 configuration\&.
 .PP
diff --git a/src/bin/xfrin/b10-xfrin.xml b/src/bin/xfrin/b10-xfrin.xml
index ea4c724..231681c 100644
--- a/src/bin/xfrin/b10-xfrin.xml
+++ b/src/bin/xfrin/b10-xfrin.xml
@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>May 19, 2011</date>
+    <date>October 12, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -59,7 +59,7 @@
       <citerefentry><refentrytitle>bind10</refentrytitle><manvolnum>8</manvolnum></citerefentry>
       boss process.
       When triggered it can request and receive a zone transfer and store
-      the zone in a BIND 10 zone data store.
+      the zone in a BIND 10 zone data source.
     </para>
 
 <!-- TODO:
@@ -68,9 +68,14 @@ The logic for handling transfer triggers or zone management is handled
 in separate zonemgr process.
 -->
 
-    <note><simpara>
-      This prototype release only supports AXFR. IXFR is not implemented.
-    </simpara></note>
+    <para>
+      The <command>b10-xfrin</command> daemon supports both AXFR and
+      IXFR.  Due to some implementation limitations of the current
+      development release, however, it only tries AXFR by default,
+      and care should be taken to enable IXFR.
+      See the BIND 10 Guide for more details.
+    </para>
+<!-- TODO: http://bind10.isc.org/ticket/1279 -->
 
     <para>
       This daemon communicates with BIND 10 over a
@@ -92,7 +97,7 @@ in separate zonemgr process.
       The configurable settings are:
     </para>
 
-    <para><varname>transfers-in</varname>
+    <para><varname>transfers_in</varname>
       defines the maximum number of inbound zone transfers
       that can run concurrently. The default is 10.
     </para>
@@ -103,8 +108,10 @@ in separate zonemgr process.
       <command>b10-xfrin</command> daemon.
       The list items are:
       <varname>name</varname> (the zone name),
+      <varname>class</varname> (defaults to <quote>IN</quote>),
       <varname>master_addr</varname> (the zone master to transfer from),
-      <varname>master_port</varname> (defaults to 53), and
+      <varname>master_port</varname> (defaults to 53),
+      <varname>use_ixfr</varname> (defaults to false), and
       <varname>tsig_key</varname> (optional TSIG key to use).
       The <varname>tsig_key</varname> is specified using a full string
       colon-delimited name:key:algorithm representation (e.g.
@@ -152,7 +159,7 @@ in separate zonemgr process.
       according to the SOA's REFRESH time
       to tell <command>b10-xfrin</command> that the zone needs to do
       a zone refresh.
-      This is an internal command and not exposed to the administrator. 
+      This is an internal command and not exposed to the administrator.
 <!-- not defined in spec -->
     </para>
 
@@ -168,7 +175,7 @@ in separate zonemgr process.
       and <varname>port</varname> to define the port number on the
       authoritative server (defaults to 53).
       If the address or port is not specified, it will use the
-      values previously defined in the <varname>zones</varname>
+      value previously defined in the <varname>zones</varname>
       configuration.
      </para>
 <!-- TODO: later hostname for master? -->
@@ -202,7 +209,7 @@ add a usage example of xfrin -->
     </para></note>
 
 <!-- TODO:
- it can handle more than one XFR in now, 
+ it can handle more than one XFR in now,
 but the problem is If SQLITE3 datasource part support multiple write
 operation
 -->
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index 0f485aa..cba98ae 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -1,3 +1,5 @@
+SUBDIRS = testdata .
+
 PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
 PYTESTS = xfrin_test.py
 EXTRA_DIST = $(PYTESTS)
@@ -6,7 +8,10 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/xfr/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +24,9 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+	PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
+	TESTDATASRCDIR=$(abs_top_srcdir)/src/bin/xfrin/tests/testdata/ \
+	TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/xfrin/tests/testdata/ \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrin/tests/testdata/Makefile.am b/src/bin/xfrin/tests/testdata/Makefile.am
new file mode 100644
index 0000000..5e325cb
--- /dev/null
+++ b/src/bin/xfrin/tests/testdata/Makefile.am
@@ -0,0 +1,2 @@
+EXTRA_DIST = example.com # not necessarily needed, but for reference
+EXTRA_DIST += example.com.sqlite3
diff --git a/src/bin/xfrin/tests/testdata/example.com b/src/bin/xfrin/tests/testdata/example.com
new file mode 100644
index 0000000..2afcd28
--- /dev/null
+++ b/src/bin/xfrin/tests/testdata/example.com
@@ -0,0 +1,17 @@
+;; This is a simplest form of zone file for 'example.com', which is the
+;; source of the corresponding sqlite3 DB file.  This file is provided
+;; for reference purposes only; it's not actually used anywhere.
+
+example.com.		3600	IN SOA	master.example.com. admin.example.com. (
+					1230       ; serial
+					3600       ; refresh (1 hour)
+					1800       ; retry (30 minutes)
+					2419200    ; expire (4 weeks)
+					7200       ; minimum (2 hours)
+					)
+			3600	NS	dns01.example.com.
+			3600	NS	dns02.example.com.
+			3600	NS	dns03.example.com.
+dns01.example.com.	3600	IN A	192.0.2.1
+dns02.example.com.	3600	IN A	192.0.2.2
+dns03.example.com.	3600	IN A	192.0.2.3
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
new file mode 100644
index 0000000..3538e3d
Binary files /dev/null and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 92bf1b0..eb2c747 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -14,11 +14,24 @@
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
 import unittest
+import re
+import shutil
 import socket
+import sys
 import io
 from isc.testutils.tsigctx_mock import MockTSIGContext
+from isc.testutils.rrset_utils import *
 from xfrin import *
+import xfrin
+from isc.xfrin.diff import Diff
 import isc.log
+# If we use any python library that is basically a wrapper for
+# a library we use as well (like sqlite3 in our datasources),
+# we must make sure we import ours first; If we have special
+# rpath or libtool rules to pick the correct version, python might
+# choose the wrong one first, if those rules aren't hit first.
+# This would result in missing symbols later.
+import sqlite3
 
 #
 # Commonly used (mostly constant) test parameters
@@ -30,40 +43,155 @@ TEST_RRCLASS_STR = 'IN'
 TEST_DB_FILE = 'db_file'
 TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
 TEST_MASTER_IPV4_ADDRINFO = (socket.AF_INET, socket.SOCK_STREAM,
-                             socket.IPPROTO_TCP, '',
                              (TEST_MASTER_IPV4_ADDRESS, 53))
 TEST_MASTER_IPV6_ADDRESS = '::1'
 TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
-                             socket.IPPROTO_TCP, '',
                              (TEST_MASTER_IPV6_ADDRESS, 53))
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
+TESTDATA_OBJDIR = os.getenv("TESTDATAOBJDIR")
 # XXX: This should be a non priviledge port that is unlikely to be used.
 # If some other process uses this port test will fail.
 TEST_MASTER_PORT = '53535'
 
 TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
 
+# SOA intended to be used for the new SOA as a result of transfer.
 soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
                   'master.example.com. admin.example.com ' +
                   '1234 3600 1800 2419200 7200')
-soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
-                  RRTTL(3600))
+soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
 soa_rrset.add_rdata(soa_rdata)
-example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS,
-                                 RRType.AXFR())
-example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS,
-                                 RRType.SOA())
+
+# SOA intended to be used for the current SOA at the secondary side.
+# Note that its serial is smaller than that of soa_rdata.
+begin_soa_rdata = Rdata(RRType.SOA(), TEST_RRCLASS,
+                        'master.example.com. admin.example.com ' +
+                        '1230 3600 1800 2419200 7200')
+begin_soa_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(), RRTTL(3600))
+begin_soa_rrset.add_rdata(begin_soa_rdata)
+example_axfr_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.AXFR())
+example_soa_question = Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA())
 default_questions = [example_axfr_question]
 default_answers = [soa_rrset]
 
+def check_diffs(assert_fn, expected, actual):
+    '''A helper function checking the differences made in the XFR session.
+
+    This is expected called from some subclass of unittest.TestCase and
+    assert_fn is generally expected to be 'self.assertEqual' of that class.
+
+    '''
+    assert_fn(len(expected), len(actual))
+    for (diffs_exp, diffs_actual) in zip(expected, actual):
+        assert_fn(len(diffs_exp), len(diffs_actual))
+        for (diff_exp, diff_actual) in zip(diffs_exp, diffs_actual):
+            # operation should match
+            assert_fn(diff_exp[0], diff_actual[0])
+            # The diff as RRset should be equal (for simplicity we assume
+            # all RRsets contain exactly one RDATA)
+            assert_fn(diff_exp[1].get_name(), diff_actual[1].get_name())
+            assert_fn(diff_exp[1].get_type(), diff_actual[1].get_type())
+            assert_fn(diff_exp[1].get_class(), diff_actual[1].get_class())
+            assert_fn(diff_exp[1].get_rdata_count(),
+                      diff_actual[1].get_rdata_count())
+            assert_fn(1, diff_exp[1].get_rdata_count())
+            assert_fn(diff_exp[1].get_rdata()[0],
+                      diff_actual[1].get_rdata()[0])
+
 class XfrinTestException(Exception):
     pass
 
+class XfrinTestTimeoutException(Exception):
+    pass
+
 class MockCC():
     def get_default_value(self, identifier):
+        # The returned values should be identical to the spec file
+        # XXX: these should be retrieved from the spec file
+        # (see MyCCSession of xfrout_test.py.in)
         if identifier == "zones/master_port":
             return TEST_MASTER_PORT
         if identifier == "zones/class":
             return TEST_RRCLASS_STR
+        if identifier == "zones/use_ixfr":
+            return False
+
+class MockDataSourceClient():
+    '''A simple mock data source client.
+
+    This class provides a minimal set of wrappers related the data source
+    API that would be used by Diff objects.  For our testing purposes they
+    only keep truck of the history of the changes.
+
+    '''
+    def __init__(self):
+        self.force_fail = False # if True, raise an exception on commit
+        self.committed_diffs = []
+        self.diffs = []
+
+    def get_class(self):
+        '''Mock version of get_class().
+
+        We simply return the commonly used constant RR class.  If and when
+        we use this mock for a different RR class we need to adjust it
+        accordingly.
+
+        '''
+        return TEST_RRCLASS
+
+    def find_zone(self, zone_name):
+        '''Mock version of find_zone().
+
+        It returns itself (subsequently acting as a mock ZoneFinder) for
+        some test zone names.  For some others it returns either NOTFOUND
+        or PARTIALMATCH.
+
+        '''
+        if zone_name == TEST_ZONE_NAME or \
+                zone_name == Name('no-soa.example') or \
+                zone_name == Name('dup-soa.example'):
+            return (isc.datasrc.DataSourceClient.SUCCESS, self)
+        elif zone_name == Name('no-such-zone.example'):
+            return (DataSourceClient.NOTFOUND, None)
+        elif zone_name == Name('partial-match-zone.example'):
+            return (DataSourceClient.PARTIALMATCH, self)
+        raise ValueError('Unexpected input to mock client: bug in test case?')
+
+    def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
+        '''Mock ZoneFinder.find().
+
+        It returns the predefined SOA RRset to queries for SOA of the common
+        test zone name.  It also emulates some unusual cases for special
+        zone names.
+
+        '''
+        if name == TEST_ZONE_NAME and rrtype == RRType.SOA():
+            return (ZoneFinder.SUCCESS, begin_soa_rrset)
+        if name == Name('no-soa.example'):
+            return (ZoneFinder.NXDOMAIN, None)
+        if name == Name('dup-soa.example'):
+            dup_soa_rrset = RRset(name, TEST_RRCLASS, RRType.SOA(), RRTTL(0))
+            dup_soa_rrset.add_rdata(begin_soa_rdata)
+            dup_soa_rrset.add_rdata(soa_rdata)
+            return (ZoneFinder.SUCCESS, dup_soa_rrset)
+        raise ValueError('Unexpected input to mock finder: bug in test case?')
+
+    def get_updater(self, zone_name, replace, journaling=False):
+        self._journaling_enabled = journaling
+        return self
+
+    def add_rrset(self, rrset):
+        self.diffs.append(('add', rrset))
+
+    def delete_rrset(self, rrset):
+        self.diffs.append(('delete', rrset))
+
+    def commit(self):
+        if self.force_fail:
+            raise isc.datasrc.Error('Updater.commit() failed')
+        self.committed_diffs.append(self.diffs)
+        self.diffs = []
 
 class MockXfrin(Xfrin):
     # This is a class attribute of a callable object that specifies a non
@@ -87,20 +215,21 @@ class MockXfrin(Xfrin):
             MockXfrin.check_command_hook()
 
     def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
-                    tsig_key, check_soa=True):
+                    tsig_key, request_type, check_soa=True):
         # store some of the arguments for verification, then call this
         # method in the superclass
         self.xfrin_started_master_addr = master_addrinfo[2][0]
         self.xfrin_started_master_port = master_addrinfo[2][1]
-        return Xfrin.xfrin_start(self, zone_name, rrclass, db_file,
+        self.xfrin_started_request_type = request_type
+        return Xfrin.xfrin_start(self, zone_name, rrclass, None,
                                  master_addrinfo, tsig_key,
-                                 check_soa)
+                                 request_type, check_soa)
 
 class MockXfrinConnection(XfrinConnection):
-    def __init__(self, sock_map, zone_name, rrclass, db_file, shutdown_event,
-                 master_addr):
-        super().__init__(sock_map, zone_name, rrclass, db_file, shutdown_event,
-                         master_addr)
+    def __init__(self, sock_map, zone_name, rrclass, datasrc_client,
+                 shutdown_event, master_addr, tsig_key=None):
+        super().__init__(sock_map, zone_name, rrclass, MockDataSourceClient(),
+                         shutdown_event, master_addr, TEST_DB_FILE)
         self.query_data = b''
         self.reply_data = b''
         self.force_time_out = False
@@ -121,8 +250,11 @@ class MockXfrinConnection(XfrinConnection):
     def recv(self, size):
         data = self.reply_data[:size]
         self.reply_data = self.reply_data[size:]
+        if len(data) == 0:
+            raise XfrinTestTimeoutException('Emulated timeout')
         if len(data) < size:
-            raise XfrinTestException('cannot get reply data')
+            raise XfrinTestException('cannot get reply data (' + str(size) +
+                                     ' bytes)')
         return data
 
     def send(self, data):
@@ -147,10 +279,11 @@ class MockXfrinConnection(XfrinConnection):
                 self.response_generator()
         return len(data)
 
-    def create_response_data(self, response=True, bad_qid=False,
+    def create_response_data(self, response=True, auth=True, bad_qid=False,
                              rcode=Rcode.NOERROR(),
                              questions=default_questions,
                              answers=default_answers,
+                             authorities=[],
                              tsig_ctx=None):
         resp = Message(Message.RENDER)
         qid = self.qid
@@ -161,8 +294,11 @@ class MockXfrinConnection(XfrinConnection):
         resp.set_rcode(rcode)
         if response:
             resp.set_header_flag(Message.HEADERFLAG_QR)
+        if auth:
+            resp.set_header_flag(Message.HEADERFLAG_AA)
         [resp.add_question(q) for q in questions]
         [resp.add_rrset(Message.SECTION_ANSWER, a) for a in answers]
+        [resp.add_rrset(Message.SECTION_AUTHORITY, a) for a in authorities]
 
         renderer = MessageRenderer()
         if tsig_ctx is not None:
@@ -174,24 +310,360 @@ class MockXfrinConnection(XfrinConnection):
 
         return reply_data
 
+class TestXfrinState(unittest.TestCase):
+    def setUp(self):
+        self.sock_map = {}
+        self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
+                                        TEST_RRCLASS, None, threading.Event(),
+                                        TEST_MASTER_IPV4_ADDRINFO)
+        self.conn.init_socket()
+        self.begin_soa = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+                               RRTTL(3600))
+        self.begin_soa.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS,
+                                       'm. r. 1230 0 0 0 0'))
+        self.ns_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(),
+                              RRTTL(3600))
+        self.ns_rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS,
+                                      'ns.example.com'))
+        self.a_rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.A(),
+                             RRTTL(3600))
+        self.a_rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, '192.0.2.1'))
+
+        self.conn._datasrc_client = MockDataSourceClient()
+        self.conn._diff = Diff(self.conn._datasrc_client, TEST_ZONE_NAME)
+
+class TestXfrinStateBase(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+
+    def test_handle_rr_on_base(self):
+        # The base version of handle_rr() isn't supposed to be called
+        # directly (the argument doesn't matter in this test)
+        self.assertRaises(XfrinException, XfrinState().handle_rr, None)
+
+class TestXfrinInitialSOA(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinInitialSOA()
+
+    def test_handle_rr(self):
+        # normal case
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+        self.assertEqual(1234, self.conn._end_serial.get_value())
+
+    def test_handle_not_soa(self):
+        # The given RR is not of SOA
+        self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_handle_ixfr_uptodate(self):
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRUptodate()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_uptodate2(self):
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(1235) # > soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRUptodate()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_uptodate3(self):
+        # Similar to the previous case, but checking serial number arithmetic
+        # comparison
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(0xffffffff)
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_axfr_uptodate(self):
+        # "request serial" should matter only for IXFR
+        self.conn._request_type = RRType.AXFR()
+        self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_finish_message(self):
+        self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinFirstData(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinFirstData()
+        self.conn._request_type = RRType.IXFR()
+        # arbitrary chosen serial < 1234:
+        self.conn._request_serial = isc.dns.Serial(1230)
+        self.conn._diff = None           # should be replaced in the AXFR case
+
+    def test_handle_ixfr_begin_soa(self):
+        self.conn._request_type = RRType.IXFR()
+        self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+        self.assertEqual(type(XfrinIXFRDeleteSOA()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_axfr(self):
+        # If the original type is AXFR, other conditions aren't considered,
+        # and AXFR processing will continue
+        self.conn._request_type = RRType.AXFR()
+        self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+        self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_to_axfr(self):
+        # Detecting AXFR-compatible IXFR response by seeing a non SOA RR after
+        # the initial SOA.  Should switch to AXFR.
+        self.assertFalse(self.state.handle_rr(self.conn, self.ns_rrset))
+        self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+        # The Diff for AXFR should be created at this point
+        self.assertNotEqual(None, self.conn._diff)
+
+    def test_handle_ixfr_to_axfr_by_different_soa(self):
+        # An unusual case: Response contains two consecutive SOA but the
+        # serial of the second does not match the requested one.  See
+        # the documentation for XfrinFirstData.handle_rr().
+        self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinAXFR()), type(self.conn.get_xfrstate()))
+        self.assertNotEqual(None, self.conn._diff)
+
+    def test_finish_message(self):
+        self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRDeleteSOA(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinIXFRDeleteSOA()
+        # In this state a new Diff object is expected to be created.  To
+        # confirm it, we nullify it beforehand.
+        self.conn._diff = None
+
+    def test_handle_rr(self):
+        self.assertTrue(self.state.handle_rr(self.conn, self.begin_soa))
+        self.assertEqual(type(XfrinIXFRDelete()),
+                         type(self.conn.get_xfrstate()))
+        self.assertEqual([('delete', self.begin_soa)],
+                         self.conn._diff.get_buffer())
+
+    def test_handle_non_soa(self):
+        self.assertRaises(XfrinException, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_finish_message(self):
+        self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRDelete(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        # We need record the state in 'conn' to check the case where the
+        # state doesn't change.
+        XfrinIXFRDelete().set_xfrstate(self.conn, XfrinIXFRDelete())
+        self.state = self.conn.get_xfrstate()
+
+    def test_handle_delete_rr(self):
+        # Non SOA RRs are simply (goting to be) deleted in this state
+        self.assertTrue(self.state.handle_rr(self.conn, self.ns_rrset))
+        self.assertEqual([('delete', self.ns_rrset)],
+                         self.conn._diff.get_buffer())
+        # The state shouldn't change
+        self.assertEqual(type(XfrinIXFRDelete()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_soa(self):
+        # SOA in this state means the beginning of added RRs.  This SOA
+        # should also be added in the next state, so handle_rr() should return
+        # false.
+        self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual([], self.conn._diff.get_buffer())
+        self.assertEqual(1234, self.conn._current_serial.get_value())
+        self.assertEqual(type(XfrinIXFRAddSOA()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_finish_message(self):
+        self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRAddSOA(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinIXFRAddSOA()
+
+    def test_handle_rr(self):
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
+        self.assertEqual([('add', soa_rrset)],
+                         self.conn._diff.get_buffer())
+
+    def test_handle_non_soa(self):
+        self.assertRaises(XfrinException, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_finish_message(self):
+        self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFRAdd(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        # We need record the state in 'conn' to check the case where the
+        # state doesn't change.
+        XfrinIXFRAdd().set_xfrstate(self.conn, XfrinIXFRAdd())
+        self.conn._current_serial = isc.dns.Serial(1230)
+        self.state = self.conn.get_xfrstate()
+
+    def test_handle_add_rr(self):
+        # Non SOA RRs are simply (goting to be) added in this state
+        self.assertTrue(self.state.handle_rr(self.conn, self.ns_rrset))
+        self.assertEqual([('add', self.ns_rrset)],
+                         self.conn._diff.get_buffer())
+        # The state shouldn't change
+        self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
+
+    def test_handle_end_soa(self):
+        self.conn._end_serial = isc.dns.Serial(1234)
+        self.conn._diff.add_data(self.ns_rrset) # put some dummy change
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+        # handle_rr should have caused commit, and the buffer should now be
+        # empty.
+        self.assertEqual([], self.conn._diff.get_buffer())
+
+    def test_handle_new_delete(self):
+        self.conn._end_serial = isc.dns.Serial(1234)
+        # SOA RR whose serial is the current one means we are going to a new
+        # difference, starting with removing that SOA.
+        self.conn._diff.add_data(self.ns_rrset) # put some dummy change
+        self.assertFalse(self.state.handle_rr(self.conn, self.begin_soa))
+        self.assertEqual([], self.conn._diff.get_buffer())
+        self.assertEqual(type(XfrinIXFRDeleteSOA()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_out_of_sync(self):
+        # getting SOA with an inconsistent serial.  This is an error.
+        self.conn._end_serial = isc.dns.Serial(1235)
+        self.assertRaises(XfrinProtocolError, self.state.handle_rr,
+                          self.conn, soa_rrset)
+
+    def test_finish_message(self):
+        self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinIXFREnd(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinIXFREnd()
+
+    def test_handle_rr(self):
+        self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_finish_message(self):
+        self.assertFalse(self.state.finish_message(self.conn))
+
+class TestXfrinIXFREnd(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinIXFRUptodate()
+
+    def test_handle_rr(self):
+        self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_finish_message(self):
+        self.assertRaises(XfrinZoneUptodate, self.state.finish_message,
+                          self.conn)
+
+class TestXfrinAXFR(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinAXFR()
+        self.conn._end_serial = isc.dns.Serial(1234)
+
+    def test_handle_rr(self):
+        """
+        Test we can put data inside.
+        """
+        # Put some data inside
+        self.assertTrue(self.state.handle_rr(self.conn, self.a_rrset))
+        # This test uses internal Diff structure to check the behaviour of
+        # XfrinAXFR. Maybe there could be a cleaner way, but it would be more
+        # complicated.
+        self.assertEqual([('add', self.a_rrset)], self.conn._diff.get_buffer())
+        # This SOA terminates the transfer
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        # It should have changed the state
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        # At this point, the data haven't been committed yet
+        self.assertEqual([('add', self.a_rrset), ('add', soa_rrset)],
+                         self.conn._diff.get_buffer())
+
+    def test_handle_rr_mismatch_soa(self):
+        """ SOA with inconsistent serial - unexpected, but we accept it.
+
+        """
+        self.assertTrue(self.state.handle_rr(self.conn, begin_soa_rrset))
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+
+    def test_finish_message(self):
+        """
+        Check normal end of message.
+        """
+        # When a message ends, nothing happens usually
+        self.assertTrue(self.state.finish_message(self.conn))
+
+class TestXfrinAXFREnd(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinAXFREnd()
+
+    def test_handle_rr(self):
+        self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_finish_message(self):
+        self.conn._diff.add_data(self.a_rrset)
+        self.conn._diff.add_data(soa_rrset)
+        self.assertFalse(self.state.finish_message(self.conn))
+
+        # The data should have been committed
+        self.assertEqual([], self.conn._diff.get_buffer())
+        check_diffs(self.assertEqual, [[('add', self.a_rrset),
+                                        ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+        self.assertRaises(ValueError, self.conn._diff.commit)
+
 class TestXfrinConnection(unittest.TestCase):
+    '''Convenient parent class for XFR-protocol tests.
+
+    This class provides common setups and helper methods for protocol related
+    tests on AXFR and IXFR.
+
+    '''
+
     def setUp(self):
         if os.path.exists(TEST_DB_FILE):
             os.remove(TEST_DB_FILE)
         self.sock_map = {}
-        self.conn = MockXfrinConnection(self.sock_map, 'example.com.',
-                                        TEST_RRCLASS, TEST_DB_FILE,
-                                        threading.Event(),
+        self.conn = MockXfrinConnection(self.sock_map, TEST_ZONE_NAME,
+                                        TEST_RRCLASS, None, threading.Event(),
                                         TEST_MASTER_IPV4_ADDRINFO)
+        self.conn.init_socket()
         self.soa_response_params = {
             'questions': [example_soa_question],
             'bad_qid': False,
             'response': True,
+            'auth': True,
             'rcode': Rcode.NOERROR(),
+            'answers': default_answers,
+            'authorities': [],
             'tsig': False,
             'axfr_after_soa': self._create_normal_response_data
             }
         self.axfr_response_params = {
+            'question_1st': default_questions,
+            'question_2nd': default_questions,
+            'answer_1st': [soa_rrset, self._create_ns()],
+            'answer_2nd': default_answers,
             'tsig_1st': None,
             'tsig_2nd': None
             }
@@ -201,6 +673,94 @@ class TestXfrinConnection(unittest.TestCase):
         if os.path.exists(TEST_DB_FILE):
             os.remove(TEST_DB_FILE)
 
+    def _create_normal_response_data(self):
+        # This helper method creates a simple sequence of DNS messages that
+        # forms a valid AXFR transaction.  It consists of two messages: the
+        # first one containing SOA, NS, the second containing the trailing SOA.
+        question_1st = self.axfr_response_params['question_1st']
+        question_2nd = self.axfr_response_params['question_2nd']
+        answer_1st = self.axfr_response_params['answer_1st']
+        answer_2nd = self.axfr_response_params['answer_2nd']
+        tsig_1st = self.axfr_response_params['tsig_1st']
+        tsig_2nd = self.axfr_response_params['tsig_2nd']
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=question_1st, answers=answer_1st,
+            tsig_ctx=tsig_1st)
+        self.conn.reply_data += \
+            self.conn.create_response_data(questions=question_2nd,
+                                           answers=answer_2nd,
+                                           tsig_ctx=tsig_2nd)
+
+    def _create_soa_response_data(self):
+        # This helper method creates a DNS message that is supposed to be
+        # used a valid response to SOA queries prior to XFR.
+        # If tsig is True, it tries to verify the query with a locally
+        # created TSIG context (which may or may not succeed) so that the
+        # response will include a TSIG.
+        # If axfr_after_soa is True, it resets the response_generator so that
+        # a valid XFR messages will follow.
+
+        verify_ctx = None
+        if self.soa_response_params['tsig']:
+            # xfrin (currently) always uses TCP.  strip off the length field.
+            query_data = self.conn.query_data[2:]
+            query_message = Message(Message.PARSE)
+            query_message.from_wire(query_data)
+            verify_ctx = TSIGContext(TSIG_KEY)
+            verify_ctx.verify(query_message.get_tsig_record(), query_data)
+
+        self.conn.reply_data = self.conn.create_response_data(
+            bad_qid=self.soa_response_params['bad_qid'],
+            response=self.soa_response_params['response'],
+            auth=self.soa_response_params['auth'],
+            rcode=self.soa_response_params['rcode'],
+            questions=self.soa_response_params['questions'],
+            answers=self.soa_response_params['answers'],
+            authorities=self.soa_response_params['authorities'],
+            tsig_ctx=verify_ctx)
+        if self.soa_response_params['axfr_after_soa'] != None:
+            self.conn.response_generator = \
+                self.soa_response_params['axfr_after_soa']
+
+    def _create_broken_response_data(self):
+        # This helper method creates a bogus "DNS message" that only contains
+        # 4 octets of data.  The DNS message parser will raise an exception.
+        bogus_data = b'xxxx'
+        self.conn.reply_data = struct.pack('H', socket.htons(len(bogus_data)))
+        self.conn.reply_data += bogus_data
+
+    def _create_a(self, address):
+        rrset = RRset(Name('a.example.com'), TEST_RRCLASS, RRType.A(),
+                      RRTTL(3600))
+        rrset.add_rdata(Rdata(RRType.A(), TEST_RRCLASS, address))
+        return rrset
+
+    def _create_soa(self, serial):
+        rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.SOA(),
+                      RRTTL(3600))
+        rdata_str = 'm. r. ' + serial + ' 3600 1800 2419200 7200'
+        rrset.add_rdata(Rdata(RRType.SOA(), TEST_RRCLASS, rdata_str))
+        return rrset
+
+    def _create_ns(self, nsname='ns.'+TEST_ZONE_NAME_STR):
+        rrset = RRset(TEST_ZONE_NAME, TEST_RRCLASS, RRType.NS(), RRTTL(3600))
+        rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
+        return rrset
+
+    def _set_test_zone(self, zone_name):
+        '''Set the zone name for transfer to the specified one.
+
+        It also make sure that the SOA RR (if exist) is correctly (re)set.
+
+        '''
+        self.conn._zone_name = zone_name
+        self.conn._zone_soa = self.conn._get_zone_soa()
+
+class TestAXFR(TestXfrinConnection):
+    def setUp(self):
+        super().setUp()
+        XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
+
     def __create_mock_tsig(self, key, error):
         # This helper function creates a MockTSIGContext for a given key
         # and TSIG error to be used as a result of verify (normally faked
@@ -236,31 +796,84 @@ class TestXfrinConnection(unittest.TestCase):
         # to confirm an AF_INET6 socket has been created.  A naive application
         # tends to assume it's IPv4 only and hardcode AF_INET.  This test
         # uncovers such a bug.
-        c = MockXfrinConnection({}, 'example.com.', TEST_RRCLASS, TEST_DB_FILE,
-                                threading.Event(),
-                                TEST_MASTER_IPV6_ADDRINFO)
+        c = MockXfrinConnection({}, TEST_ZONE_NAME, TEST_RRCLASS, None,
+                                threading.Event(), TEST_MASTER_IPV6_ADDRINFO)
+        c.init_socket()
         c.bind(('::', 0))
         c.close()
 
     def test_init_chclass(self):
-        c = XfrinConnection({}, 'example.com.', RRClass.CH(), TEST_DB_FILE,
-                            threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
+        c = MockXfrinConnection({}, TEST_ZONE_NAME, RRClass.CH(), None,
+                                threading.Event(), TEST_MASTER_IPV4_ADDRINFO)
+        c.init_socket()
         axfrmsg = c._create_query(RRType.AXFR())
         self.assertEqual(axfrmsg.get_question()[0].get_class(),
                          RRClass.CH())
         c.close()
 
-    def test_send_query(self):
-        def create_msg(query_type):
-            msg = Message(Message.RENDER)
-            query_id = 0x1035
-            msg.set_qid(query_id)
-            msg.set_opcode(Opcode.QUERY())
-            msg.set_rcode(Rcode.NOERROR())
-            query_question = Question(Name("example.com."), RRClass.IN(), query_type)
-            msg.add_question(query_question)
-            return msg
+    def test_create_query(self):
+        def check_query(expected_qtype, expected_auth):
+            '''Helper method to repeat the same pattern of tests'''
+            self.assertEqual(Opcode.QUERY(), msg.get_opcode())
+            self.assertEqual(Rcode.NOERROR(), msg.get_rcode())
+            self.assertEqual(1, msg.get_rr_count(Message.SECTION_QUESTION))
+            self.assertEqual(TEST_ZONE_NAME, msg.get_question()[0].get_name())
+            self.assertEqual(expected_qtype, msg.get_question()[0].get_type())
+            self.assertEqual(0, msg.get_rr_count(Message.SECTION_ANSWER))
+            self.assertEqual(0, msg.get_rr_count(Message.SECTION_ADDITIONAL))
+            if expected_auth is None:
+                self.assertEqual(0,
+                                 msg.get_rr_count(Message.SECTION_AUTHORITY))
+            else:
+                self.assertEqual(1,
+                                 msg.get_rr_count(Message.SECTION_AUTHORITY))
+                auth_rr = msg.get_section(Message.SECTION_AUTHORITY)[0]
+                self.assertEqual(expected_auth.get_name(), auth_rr.get_name())
+                self.assertEqual(expected_auth.get_type(), auth_rr.get_type())
+                self.assertEqual(expected_auth.get_class(),
+                                 auth_rr.get_class())
+                # In our test scenario RDATA must be 1
+                self.assertEqual(1, expected_auth.get_rdata_count())
+                self.assertEqual(1, auth_rr.get_rdata_count())
+                self.assertEqual(expected_auth.get_rdata()[0],
+                                 auth_rr.get_rdata()[0])
+
+        # Actual tests start here
+        # SOA query
+        msg = self.conn._create_query(RRType.SOA())
+        check_query(RRType.SOA(), None)
+
+        # AXFR query
+        msg = self.conn._create_query(RRType.AXFR())
+        check_query(RRType.AXFR(), None)
+
+        # IXFR query
+        msg = self.conn._create_query(RRType.IXFR())
+        check_query(RRType.IXFR(), begin_soa_rrset)
+        self.assertEqual(1230, self.conn._request_serial.get_value())
+
+    def test_create_ixfr_query_fail(self):
+        # In these cases _create_query() will fail to find a valid SOA RR to
+        # insert in the IXFR query, and should raise an exception.
+
+        self._set_test_zone(Name('no-such-zone.example'))
+        self.assertRaises(XfrinException, self.conn._create_query,
+                          RRType.IXFR())
+
+        self._set_test_zone(Name('partial-match-zone.example'))
+        self.assertRaises(XfrinException, self.conn._create_query,
+                          RRType.IXFR())
+
+        self._set_test_zone(Name('no-soa.example'))
+        self.assertRaises(XfrinException, self.conn._create_query,
+                          RRType.IXFR())
+
+        self._set_test_zone(Name('dup-soa.example'))
+        self.conn._zone_soa = self.conn._get_zone_soa()
+        self.assertRaises(XfrinException, self.conn._create_query,
+                          RRType.IXFR())
 
+    def test_send_query(self):
         def message_has_tsig(data):
             # a simple check if the actual data contains a TSIG RR.
             # At our level this simple check should suffice; other detailed
@@ -269,14 +882,6 @@ class TestXfrinConnection(unittest.TestCase):
             msg.from_wire(data)
             return msg.get_tsig_record() is not None
 
-        self.conn._create_query = create_msg
-        # soa request
-        self.conn._send_query(RRType.SOA())
-        self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\x06\x00\x01')
-        # axfr request
-        self.conn._send_query(RRType.AXFR())
-        self.assertEqual(self.conn.query_data, b'\x00\x1d\x105\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
-
         # soa request with tsig
         self.conn._tsig_key = TSIG_KEY
         self.conn._send_query(RRType.SOA())
@@ -288,24 +893,31 @@ class TestXfrinConnection(unittest.TestCase):
 
     def test_response_with_invalid_msg(self):
         self.conn.reply_data = b'aaaxxxx'
-        self.assertRaises(XfrinTestException, self._handle_xfrin_response)
+        self.assertRaises(XfrinTestException,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_with_tsigfail(self):
         self.conn._tsig_key = TSIG_KEY
         # server tsig check fail, return with RCODE 9 (NOTAUTH)
         self.conn._send_query(RRType.SOA())
-        self.conn.reply_data = self.conn.create_response_data(rcode=Rcode.NOTAUTH())
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.conn.reply_data = \
+            self.conn.create_response_data(rcode=Rcode.NOTAUTH())
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_without_end_soa(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data()
-        self.assertRaises(XfrinTestException, self._handle_xfrin_response)
+        # This should result in timeout in the asyncore loop.  We emulate
+        # that situation in recv() by emptying the reply data buffer.
+        self.assertRaises(XfrinTestTimeoutException,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_bad_qid(self):
         self.conn._send_query(RRType.AXFR())
-        self.conn.reply_data = self.conn.create_response_data(bad_qid = True)
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_error_code_bad_sig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -316,9 +928,9 @@ class TestXfrinConnection(unittest.TestCase):
                 rcode=Rcode.SERVFAIL())
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADSIG",
-                               self._handle_xfrin_response)
+                               self.conn._handle_xfrin_responses)
 
     def test_response_bad_qid_bad_key(self):
         self.conn._tsig_key = TSIG_KEY
@@ -328,38 +940,34 @@ class TestXfrinConnection(unittest.TestCase):
         self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADKEY",
-                               self._handle_xfrin_response)
+                               self.conn._handle_xfrin_responses)
 
     def test_response_non_response(self):
         self.conn._send_query(RRType.AXFR())
-        self.conn.reply_data = self.conn.create_response_data(response = False)
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.conn.reply_data = self.conn.create_response_data(response=False)
+        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
 
     def test_response_error_code(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(
             rcode=Rcode.SERVFAIL())
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_multi_question(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(
             questions=[example_axfr_question, example_axfr_question])
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
-
-    def test_response_empty_answer(self):
-        self.conn._send_query(RRType.AXFR())
-        self.conn.reply_data = self.conn.create_response_data(answers=[])
-        # Should an empty answer trigger an exception?  Even though it's very
-        # unusual it's not necessarily invalid.  Need to revisit.
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_non_response(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(response = False)
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_soacheck(self):
         # we need to defer the creation until we know the QID, which is
@@ -374,7 +982,7 @@ class TestXfrinConnection(unittest.TestCase):
     def test_soacheck_badqid(self):
         self.soa_response_params['bad_qid'] = True
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_bad_qid_bad_sig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -384,19 +992,123 @@ class TestXfrinConnection(unittest.TestCase):
         self.conn.response_generator = self._create_soa_response_data
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADSIG",
                                self.conn._check_soa_serial)
 
     def test_soacheck_non_response(self):
         self.soa_response_params['response'] = False
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_error_code(self):
         self.soa_response_params['rcode'] = Rcode.SERVFAIL()
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_notauth(self):
+        self.soa_response_params['auth'] = False
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate(self):
+        # Primary's SOA serial is identical the local serial
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate2(self):
+        # Primary's SOA serial is "smaller" than the local serial
+        self.soa_response_params['answers'] = [create_soa(1229)]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate3(self):
+        # Similar to the previous case, but checking the comparison is based
+        # on the serial number arithmetic.
+        self.soa_response_params['answers'] = [create_soa(0xffffffff)]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_newzone(self):
+        # Primary's SOA is 'old', but this secondary doesn't know anything
+        # about the zone yet, so it should accept it.
+        def response_generator():
+            # _request_serial is set in _check_soa_serial().  Reset it here.
+            self.conn._request_serial = None
+            self._create_soa_response_data()
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = response_generator
+        self.assertEqual(XFRIN_OK, self.conn._check_soa_serial())
+
+    def test_soacheck_question_empty(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = []
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_name_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(Name('example.org'),
+                                                          TEST_RRCLASS,
+                                                          RRType.SOA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_class_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+                                                          RRClass.CH(),
+                                                          RRType.SOA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_type_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+                                                          TEST_RRCLASS,
+                                                          RRType.AAAA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_no_soa(self):
+        # The response just doesn't contain SOA without any other indication
+        # of errors.
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_soa_name_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = [create_soa(1234,
+                                                          Name('example.org'))]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_soa_class_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        soa = RRset(TEST_ZONE_NAME, RRClass.CH(), RRType.SOA(), RRTTL(0))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.CH(), 'm. r. 1234 0 0 0 0'))
+        self.soa_response_params['answers'] = [soa]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_multiple_soa(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = [soa_rrset, soa_rrset]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_cname_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        # Add SOA to answer, too, to make sure that it that deceives the parser
+        self.soa_response_params['answers'] = [soa_rrset, create_cname()]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_referral_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.soa_response_params['authorities'] = [create_ns('ns.example.com')]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_nodata_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.soa_response_params['authorities'] = [soa_rrset]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig(self):
         # Use a mock tsig context emulating a validly signed response
@@ -415,7 +1127,7 @@ class TestXfrinConnection(unittest.TestCase):
         self.soa_response_params['rcode'] = Rcode.NOTAUTH()
         self.conn.response_generator = self._create_soa_response_data
 
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig_noerror_badsig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -428,7 +1140,7 @@ class TestXfrinConnection(unittest.TestCase):
         # treat this as a final failure (just as BIND 9 does).
         self.conn.response_generator = self._create_soa_response_data
 
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig_unsigned_response(self):
         # we can use a real TSIGContext for this.  the response doesn't
@@ -437,47 +1149,173 @@ class TestXfrinConnection(unittest.TestCase):
         # it as a fatal transaction failure, too.
         self.conn._tsig_key = TSIG_KEY
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_unexpected_tsig_response(self):
         # we reject unexpected TSIG in responses (following BIND 9's
         # behavior)
         self.soa_response_params['tsig'] = True
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_response_shutdown(self):
         self.conn.response_generator = self._create_normal_response_data
         self.conn._shutdown_event.set()
         self.conn._send_query(RRType.AXFR())
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
 
     def test_response_timeout(self):
         self.conn.response_generator = self._create_normal_response_data
         self.conn.force_time_out = True
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
 
     def test_response_remote_close(self):
         self.conn.response_generator = self._create_normal_response_data
         self.conn.force_close = True
-        self.assertRaises(XfrinException, self._handle_xfrin_response)
+        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
 
     def test_response_bad_message(self):
         self.conn.response_generator = self._create_broken_response_data
         self.conn._send_query(RRType.AXFR())
-        self.assertRaises(Exception, self._handle_xfrin_response)
+        self.assertRaises(Exception, self.conn._handle_xfrin_responses)
 
-    def test_response(self):
-        # normal case.
+    def test_axfr_response(self):
+        # A simple normal case: AXFR consists of SOA, NS, then trailing SOA.
+        self.conn.response_generator = self._create_normal_response_data
+        self.conn._send_query(RRType.AXFR())
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('add', self._create_ns()), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_response_empty_answer(self):
+        '''Test with an empty AXFR answer section.
+
+        This is an unusual response, but there is no reason to reject it.
+        The second message is a complete AXFR response, and transfer should
+        succeed just like the normal case.
+
+        '''
+
+        self.axfr_response_params['answer_1st'] = []
+        self.axfr_response_params['answer_2nd'] = [soa_rrset,
+                                                   self._create_ns(),
+                                                   soa_rrset]
+        self.conn.response_generator = self._create_normal_response_data
+        self.conn._send_query(RRType.AXFR())
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('add', self._create_ns()), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_axfr_response_soa_mismatch(self):
+        '''AXFR response whose begin/end SOAs are not same.
+
+        What should we do this is moot, for now we accept it, so does BIND 9.
+
+        '''
+        ns_rr = self._create_ns()
+        a_rr = self._create_a('192.0.2.1')
+        self.conn._send_query(RRType.AXFR())
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                RRType.AXFR())],
+            # begin serial=1230, end serial=1234. end will be used.
+            answers=[begin_soa_rrset, ns_rr, a_rr, soa_rrset])
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('add', ns_rr), ('add', a_rr), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_axfr_response_extra(self):
+        '''Test with an extra RR after the end of AXFR session.
+
+        The session should be rejected, and nothing should be committed.
+
+        '''
+        ns_rr = self._create_ns()
+        a_rr = self._create_a('192.0.2.1')
+        self.conn._send_query(RRType.AXFR())
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                RRType.AXFR())],
+            answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+
+    def test_axfr_response_qname_mismatch(self):
+        '''AXFR response with a mismatch question name.
+
+        Our implementation accepts that, so does BIND 9.
+
+        '''
+        self.axfr_response_params['question_1st'] = \
+            [Question(Name('mismatch.example'), TEST_RRCLASS, RRType.AXFR())]
         self.conn.response_generator = self._create_normal_response_data
         self.conn._send_query(RRType.AXFR())
-        # two SOAs, and only these have been transfered.  the 2nd SOA is just
-        # a marker, so only 1 RR has been provided in the iteration.
-        self.assertEqual(self._handle_xfrin_response(), 1)
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('add', self._create_ns()), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_axfr_response_qclass_mismatch(self):
+        '''AXFR response with a mismatch RR class.
+
+        Our implementation accepts that, so does BIND 9.
+
+        '''
+        self.axfr_response_params['question_1st'] = \
+            [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.AXFR())]
+        self.conn.response_generator = self._create_normal_response_data
+        self.conn._send_query(RRType.AXFR())
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('add', self._create_ns()), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_axfr_response_qtype_mismatch(self):
+        '''AXFR response with a mismatch RR type.
+
+        Our implementation accepts that, so does BIND 9.
+
+        '''
+        # returning IXFR in question to AXFR query
+        self.axfr_response_params['question_1st'] = \
+            [Question(TEST_ZONE_NAME, RRClass.CH(), RRType.IXFR())]
+        self.conn.response_generator = self._create_normal_response_data
+        self.conn._send_query(RRType.AXFR())
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('add', self._create_ns()), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_axfr_response_empty_question(self):
+        '''AXFR response with an empty question.
+
+        Our implementation accepts that, so does BIND 9.
+
+        '''
+        self.axfr_response_params['question_1st'] = []
+        self.conn.response_generator = self._create_normal_response_data
+        self.conn._send_query(RRType.AXFR())
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('add', self._create_ns()), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
 
     def test_do_xfrin(self):
         self.conn.response_generator = self._create_normal_response_data
         self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+        self.assertFalse(self.conn._datasrc_client._journaling_enabled)
 
     def test_do_xfrin_with_tsig(self):
         # use TSIG with a mock context.  we fake all verify results to
@@ -487,9 +1325,10 @@ class TestXfrinConnection(unittest.TestCase):
             lambda key: self.__create_mock_tsig(key, TSIGError.NOERROR)
         self.conn.response_generator = self._create_normal_response_data
         self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
-        # We use two messages in the tests.  The same context should have been
-        # usef for both.
-        self.assertEqual(2, self.conn._tsig_ctx.verify_called)
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('add', self._create_ns()), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
 
     def test_do_xfrin_with_tsig_fail(self):
         # TSIG verify will fail for the first message.  xfrin should fail
@@ -569,16 +1408,28 @@ class TestXfrinConnection(unittest.TestCase):
         self.conn.response_generator = self._create_broken_response_data
         self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
 
-    def test_do_xfrin_dberror(self):
-        # DB file is under a non existent directory, so its creation will fail,
-        # which will make the transfer fail.
-        self.conn._db_file = "not_existent/" + TEST_DB_FILE
+    def test_do_xfrin_datasrc_error(self):
+        # Emulate failure in the data source client on commit.
+        self.conn._datasrc_client.force_fail = True
+        self.conn.response_generator = self._create_normal_response_data
         self.assertEqual(self.conn.do_xfrin(False), XFRIN_FAIL)
 
     def test_do_soacheck_and_xfrin(self):
         self.conn.response_generator = self._create_soa_response_data
         self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
 
+    def test_do_soacheck_uptodate(self):
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+
+    def test_do_soacheck_protocol_error(self):
+        # There are several cases, but at this level it's sufficient to check
+        # only one.  We use the case where there's no SOA in the response.
+        self.soa_response_params['answers'] = []
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
+
     def test_do_soacheck_and_xfrin_with_tsig(self):
         # We are going to have a SOA query/response transaction, followed by
         # AXFR, all TSIG signed.  xfrin should use a new TSIG context for
@@ -598,10 +1449,7 @@ class TestXfrinConnection(unittest.TestCase):
 
     def test_do_soacheck_broken_response(self):
         self.conn.response_generator = self._create_broken_response_data
-        # XXX: TODO: this test failed here, should xfr not raise an
-        # exception but simply drop and return FAIL?
-        #self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
-        self.assertRaises(MessageTooShort, self.conn.do_xfrin, True)
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
 
     def test_do_soacheck_badqid(self):
         # the QID mismatch would internally trigger a XfrinException exception,
@@ -610,59 +1458,448 @@ class TestXfrinConnection(unittest.TestCase):
         self.conn.response_generator = self._create_soa_response_data
         self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
 
-    def _handle_xfrin_response(self):
-        # This helper methods iterates over all RRs (excluding the ending SOA)
-        # transferred, and simply returns the number of RRs.  The return value
-        # may be used an assertion value for test cases.
-        rrs = 0
-        for rr in self.conn._handle_xfrin_response():
-            rrs += 1
-        return rrs
+class TestIXFRResponse(TestXfrinConnection):
+    def setUp(self):
+        super().setUp()
+        self.conn._query_id = self.conn.qid = 1035
+        self.conn._request_serial = isc.dns.Serial(1230)
+        self.conn._request_type = RRType.IXFR()
+        self.conn._datasrc_client = MockDataSourceClient()
+        XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
 
-    def _create_normal_response_data(self):
-        # This helper method creates a simple sequence of DNS messages that
-        # forms a valid XFR transaction.  It consists of two messages, each
-        # containing just a single SOA RR.
-        tsig_1st = self.axfr_response_params['tsig_1st']
-        tsig_2nd = self.axfr_response_params['tsig_2nd']
-        self.conn.reply_data = self.conn.create_response_data(tsig_ctx=tsig_1st)
-        self.conn.reply_data += \
-            self.conn.create_response_data(tsig_ctx=tsig_2nd)
+    def test_ixfr_response(self):
+        '''A simplest form of IXFR response.
 
-    def _create_soa_response_data(self):
-        # This helper method creates a DNS message that is supposed to be
-        # used a valid response to SOA queries prior to XFR.
-        # If tsig is True, it tries to verify the query with a locally
-        # created TSIG context (which may or may not succeed) so that the
-        # response will include a TSIG.
-        # If axfr_after_soa is True, it resets the response_generator so that
-        # a valid XFR messages will follow.
-
-        verify_ctx = None
-        if self.soa_response_params['tsig']:
-            # xfrin (curreently) always uses TCP.  strip off the length field.
-            query_data = self.conn.query_data[2:]
-            query_message = Message(Message.PARSE)
-            query_message.from_wire(query_data)
-            verify_ctx = TSIGContext(TSIG_KEY)
-            verify_ctx.verify(query_message.get_tsig_record(), query_data)
+        It simply updates the zone's SOA one time.
 
+        '''
         self.conn.reply_data = self.conn.create_response_data(
-            bad_qid=self.soa_response_params['bad_qid'],
-            response=self.soa_response_params['response'],
-            rcode=self.soa_response_params['rcode'],
-            questions=self.soa_response_params['questions'],
-            tsig_ctx=verify_ctx)
-        if self.soa_response_params['axfr_after_soa'] != None:
-            self.conn.response_generator = \
-                self.soa_response_params['axfr_after_soa']
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+        self.assertTrue(self.conn._datasrc_client._journaling_enabled)
+        self.assertEqual([], self.conn._datasrc_client.diffs)
+        check_diffs(self.assertEqual,
+                    [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_response_multi_sequences(self):
+        '''Similar to the previous case, but with multiple diff seqs.
+
+        '''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset,
+                     # removing one A in serial 1230
+                     begin_soa_rrset, self._create_a('192.0.2.1'),
+                     # adding one A in serial 1231
+                     self._create_soa('1231'), self._create_a('192.0.2.2'),
+                     # removing one A in serial 1231
+                     self._create_soa('1231'), self._create_a('192.0.2.3'),
+                     # adding one A in serial 1232
+                     self._create_soa('1232'), self._create_a('192.0.2.4'),
+                     # removing one A in serial 1232
+                     self._create_soa('1232'), self._create_a('192.0.2.5'),
+                     # adding one A in serial 1234
+                     soa_rrset, self._create_a('192.0.2.6'),
+                     soa_rrset])
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+        self.assertEqual([], self.conn._datasrc_client.diffs)
+        check_diffs(self.assertEqual,
+                    [[('delete', begin_soa_rrset),
+                      ('delete', self._create_a('192.0.2.1')),
+                      ('add', self._create_soa('1231')),
+                      ('add', self._create_a('192.0.2.2'))],
+                     [('delete', self._create_soa('1231')),
+                      ('delete', self._create_a('192.0.2.3')),
+                      ('add', self._create_soa('1232')),
+                      ('add', self._create_a('192.0.2.4'))],
+                     [('delete', self._create_soa('1232')),
+                      ('delete', self._create_a('192.0.2.5')),
+                      ('add', soa_rrset),
+                      ('add', self._create_a('192.0.2.6'))]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_response_multi_messages(self):
+        '''Similar to the first case, but RRs span over multiple messages.
+
+        '''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset, begin_soa_rrset, soa_rrset])
+        self.conn.reply_data += self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset])
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_response_uptodate(self):
+        '''IXFR response indicates the zone is new enough'''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[begin_soa_rrset])
+        self.assertRaises(XfrinZoneUptodate, self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_response_broken(self):
+        '''Test with a broken response.
+
+        '''
+        # SOA sequence is out-of-sync
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+                     self._create_soa('1235')])
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_response_extra(self):
+        '''Test with an extra RR after the end of IXFR diff sequences.
+
+        IXFR should be rejected, but complete diff sequences should be
+        committed; it's not clear whether it's compliant to the protocol
+        specification, but it is how BIND 9 works and we do the same.
+        '''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset,
+                     self._create_a('192.0.2.1')])
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
+        check_diffs(self.assertEqual,
+                    [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_response_uptodate_extra(self):
+        '''Similar to 'uptodate' test, but with extra bogus data.
+
+        In either case an exception will be raised, but in this case it's
+        considered an error.
+
+        '''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[begin_soa_rrset, soa_rrset])
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_to_axfr_response(self):
+        '''AXFR-style IXFR response.
+
+        It simply updates the zone's SOA one time.
+
+        '''
+        ns_rr = self._create_ns()
+        a_rr = self._create_a('192.0.2.1')
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        # In the case AXFR-style IXFR, journaling must have been disabled.
+        self.assertFalse(self.conn._datasrc_client._journaling_enabled)
+        self.assertEqual([], self.conn._datasrc_client.diffs)
+        # The SOA should be added exactly once, and in our implementation
+        # it should be added at the end of the sequence.
+        check_diffs(self.assertEqual,
+                    [[('add', ns_rr), ('add', a_rr), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_to_axfr_response_mismatch_soa(self):
+        '''AXFR-style IXFR response, but the two SOA are not the same.
+
+        In the current implementation, we accept it and use the second SOA.
+
+        '''
+        ns_rr = self._create_ns()
+        a_rr = self._create_a('192.0.2.1')
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset, ns_rr, a_rr, begin_soa_rrset])
+        self.conn._handle_xfrin_responses()
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        self.assertEqual([], self.conn._datasrc_client.diffs)
+        check_diffs(self.assertEqual,
+                    [[('add', ns_rr), ('add', a_rr),
+                      ('add', begin_soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+    def test_ixfr_to_axfr_response_extra(self):
+        '''Test with an extra RR after the end of AXFR-style IXFR session.
+
+        The session should be rejected, and nothing should be committed.
+
+        '''
+        ns_rr = self._create_ns()
+        a_rr = self._create_a('192.0.2.1')
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[soa_rrset, ns_rr, a_rr, soa_rrset, a_rr])
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
+        self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        self.assertEqual([], self.conn._datasrc_client.committed_diffs)
+
+class TestIXFRSession(TestXfrinConnection):
+    '''Tests for a full IXFR session (query and response).
+
+    Detailed corner cases should have been covered in test_create_query()
+    and TestIXFRResponse, so we'll only check some typical cases to confirm
+    the general logic flow.
+    '''
+    def setUp(self):
+        super().setUp()
 
-    def _create_broken_response_data(self):
-        # This helper method creates a bogus "DNS message" that only contains
-        # 4 octets of data.  The DNS message parser will raise an exception.
-        bogus_data = b'xxxx'
-        self.conn.reply_data = struct.pack('H', socket.htons(len(bogus_data)))
-        self.conn.reply_data += bogus_data
+    def test_do_xfrin(self):
+        def create_ixfr_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR())],
+                answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+        self.conn.response_generator = create_ixfr_response
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
+        # Check some details of the IXFR protocol processing
+        self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+        check_diffs(self.assertEqual,
+                    [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
+                    self.conn._datasrc_client.committed_diffs)
+
+        # Check if the query was IXFR.
+        qdata = self.conn.query_data[2:]
+        qmsg = Message(Message.PARSE)
+        qmsg.from_wire(qdata, len(qdata))
+        self.assertEqual(1, qmsg.get_rr_count(Message.SECTION_QUESTION))
+        self.assertEqual(TEST_ZONE_NAME, qmsg.get_question()[0].get_name())
+        self.assertEqual(RRType.IXFR(), qmsg.get_question()[0].get_type())
+
+    def test_do_xfrin_fail(self):
+        '''IXFR fails due to a protocol error.
+
+        '''
+        def create_ixfr_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR())],
+                answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+                         self._create_soa('1235')])
+        self.conn.response_generator = create_ixfr_response
+        self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+
+    def test_do_xfrin_fail2(self):
+        '''IXFR fails due to a bogus DNS message.
+
+        '''
+        self._create_broken_response_data()
+        self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+
+    def test_do_xfrin_uptodate(self):
+        '''IXFR is (gracefully) aborted because serial is not new
+
+        '''
+        def create_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR())],
+                answers=[begin_soa_rrset])
+        self.conn.response_generator = create_response
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
+class TestXFRSessionWithSQLite3(TestXfrinConnection):
+    '''Tests for XFR sessions using an SQLite3 DB.
+
+    These are provided mainly to confirm the implementation actually works
+    in an environment closer to actual operational environments.  So we
+    only check a few common cases; other details are tested using mock
+    data sources.
+
+    '''
+    def setUp(self):
+        self.sqlite3db_src = TESTDATA_SRCDIR + '/example.com.sqlite3'
+        self.sqlite3db_obj = TESTDATA_OBJDIR + '/example.com.sqlite3.copy'
+        self.empty_sqlite3db_obj = TESTDATA_OBJDIR + '/empty.sqlite3'
+        self.sqlite3db_cfg = "{ \"database_file\": \"" +\
+                             self.sqlite3db_obj + "\"}"
+        super().setUp()
+        if os.path.exists(self.sqlite3db_obj):
+            os.unlink(self.sqlite3db_obj)
+        if os.path.exists(self.empty_sqlite3db_obj):
+            os.unlink(self.empty_sqlite3db_obj)
+        shutil.copyfile(self.sqlite3db_src, self.sqlite3db_obj)
+        self.conn._datasrc_client = DataSourceClient("sqlite3",
+                                                     self.sqlite3db_cfg)
+
+    def tearDown(self):
+        if os.path.exists(self.sqlite3db_obj):
+            os.unlink(self.sqlite3db_obj)
+        if os.path.exists(self.empty_sqlite3db_obj):
+            os.unlink(self.empty_sqlite3db_obj)
+
+    def get_zone_serial(self):
+        result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
+        self.assertEqual(DataSourceClient.SUCCESS, result)
+        result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA())
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual(1, soa.get_rdata_count())
+        return get_soa_serial(soa.get_rdata()[0])
+
+    def record_exist(self, name, type):
+        result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
+        self.assertEqual(DataSourceClient.SUCCESS, result)
+        result, soa = finder.find(name, type)
+        return result == ZoneFinder.SUCCESS
+
+    def test_do_ixfrin_sqlite3(self):
+        def create_ixfr_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR())],
+                answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
+        self.conn.response_generator = create_ixfr_response
+
+        # Confirm xfrin succeeds and SOA is updated
+        self.assertEqual(1230, self.get_zone_serial().get_value())
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+        self.assertEqual(1234, self.get_zone_serial().get_value())
+
+        # Also confirm the corresponding diffs are stored in the diffs table
+        conn = sqlite3.connect(self.sqlite3db_obj)
+        cur = conn.cursor()
+        cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+        soa_rdata_base = 'master.example.com. admin.example.com. ' + \
+            'SERIAL 3600 1800 2419200 7200'
+        self.assertEqual(cur.fetchall(),
+                         [(TEST_ZONE_NAME_STR, 'SOA', 3600,
+                           re.sub('SERIAL', str(1230), soa_rdata_base)),
+                          (TEST_ZONE_NAME_STR, 'SOA', 3600,
+                           re.sub('SERIAL', str(1234), soa_rdata_base))])
+        conn.close()
+
+    def test_do_ixfrin_sqlite3_fail(self):
+        '''Similar to the previous test, but xfrin fails due to error.
+
+        Check the DB is not changed.
+
+        '''
+        def create_ixfr_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR())],
+                answers=[soa_rrset, begin_soa_rrset, soa_rrset,
+                         self._create_soa('1235')])
+        self.conn.response_generator = create_ixfr_response
+
+        self.assertEqual(1230, self.get_zone_serial().get_value())
+        self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+        self.assertEqual(1230, self.get_zone_serial().get_value())
+
+    def test_do_ixfrin_nozone_sqlite3(self):
+        self._set_test_zone(Name('nosuchzone.example'))
+        self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
+        # This should fail even before starting state transition
+        self.assertEqual(None, self.conn.get_xfrstate())
+
+    def axfr_check(self, type):
+        '''Common checks for AXFR and AXFR-style IXFR
+
+        '''
+        def create_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, type)],
+                answers=[soa_rrset, self._create_ns(), soa_rrset])
+        self.conn.response_generator = create_response
+
+        # Confirm xfrin succeeds and SOA is updated, A RR is deleted.
+        self.assertEqual(1230, self.get_zone_serial().get_value())
+        self.assertTrue(self.record_exist(Name('dns01.example.com'),
+                                          RRType.A()))
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
+        self.assertEqual(1234, self.get_zone_serial().get_value())
+        self.assertFalse(self.record_exist(Name('dns01.example.com'),
+                                           RRType.A()))
+
+    def test_do_ixfrin_axfr_sqlite3(self):
+        '''AXFR-style IXFR.
+
+        '''
+        self.axfr_check(RRType.IXFR())
+
+    def test_do_axfrin_sqlite3(self):
+        '''AXFR.
+
+        '''
+        self.axfr_check(RRType.AXFR())
+
+    def axfr_failure_check(self, type):
+        '''Similar to the previous two tests, but xfrin fails due to error.
+
+        Check the DB is not changed.
+
+        '''
+        def create_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, type)],
+                answers=[soa_rrset, self._create_ns(), soa_rrset, soa_rrset])
+        self.conn.response_generator = create_response
+
+        self.assertEqual(1230, self.get_zone_serial().get_value())
+        self.assertTrue(self.record_exist(Name('dns01.example.com'),
+                                          RRType.A()))
+        self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
+        self.assertEqual(1230, self.get_zone_serial().get_value())
+        self.assertTrue(self.record_exist(Name('dns01.example.com'),
+                                          RRType.A()))
+
+    def test_do_xfrin_axfr_sqlite3_fail(self):
+        '''Failure case for AXFR-style IXFR.
+
+        '''
+        self.axfr_failure_check(RRType.IXFR())
+
+    def test_do_axfrin_sqlite3_fail(self):
+        '''Failure case for AXFR.
+
+        '''
+        self.axfr_failure_check(RRType.AXFR())
+
+    def test_do_axfrin_nozone_sqlite3(self):
+        '''AXFR test with an empty SQLite3 DB file, thus no target zone there.
+
+        For now, we provide backward compatible behavior: xfrin will create
+        the zone (after even setting up the entire schema) in the zone.
+        Note: a future version of this test will make it fail.
+
+        '''
+        self.conn._db_file = self.empty_sqlite3db_obj
+        self.conn._datasrc_client = DataSourceClient(
+            "sqlite3",
+            "{ \"database_file\": \"" + self.empty_sqlite3db_obj + "\"}")
+        def create_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.AXFR())],
+                answers=[soa_rrset, self._create_ns(), soa_rrset])
+        self.conn.response_generator = create_response
+        self._set_test_zone(Name('example.com'))
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
+        self.assertEqual(type(XfrinAXFREnd()),
+                         type(self.conn.get_xfrstate()))
+        self.assertEqual(1234, self.get_zone_serial().get_value())
+        self.assertFalse(self.record_exist(Name('dns01.example.com'),
+                                           RRType.A()))
 
 class TestXfrinRecorder(unittest.TestCase):
     def setUp(self):
@@ -696,6 +1933,110 @@ class TestXfrinRecorder(unittest.TestCase):
         self.recorder.decrement(TEST_ZONE_NAME)
         self.assertEqual(self.recorder.xfrin_in_progress(TEST_ZONE_NAME), False)
 
+class TestXfrinProcess(unittest.TestCase):
+    def setUp(self):
+        self.unlocked = False
+        self.conn_closed = False
+        self.do_raise_on_close = False
+        self.do_raise_on_connect = False
+        self.do_raise_on_publish = False
+        self.master = (socket.AF_INET, socket.SOCK_STREAM,
+                       (TEST_MASTER_IPV4_ADDRESS, TEST_MASTER_PORT))
+
+    def tearDown(self):
+        # whatever happens the lock acquired in xfrin_recorder.increment
+        # must always be released.  We checked the condition for all test
+        # cases.
+        self.assertTrue(self.unlocked)
+
+        # Same for the connection
+        self.assertTrue(self.conn_closed)
+
+    def increment(self, zone_name):
+        '''Fake method of xfrin_recorder.increment.
+
+        '''
+        self.unlocked = False
+
+    def decrement(self, zone_name):
+        '''Fake method of xfrin_recorder.decrement.
+
+        '''
+        self.unlocked = True
+
+    def publish_xfrin_news(self, zone_name, rrclass, ret):
+        '''Fake method of serve.publish_xfrin_news
+
+        '''
+        if self.do_raise_on_publish:
+            raise XfrinTestException('Emulated exception in publish')
+
+    def connect_to_master(self, conn):
+        self.sock_fd = conn.fileno()
+        if self.do_raise_on_connect:
+            raise XfrinTestException('Emulated exception in connect')
+        return True
+
+    def conn_close(self, conn):
+        self.conn_closed = True
+        XfrinConnection.close(conn)
+        if self.do_raise_on_close:
+            raise XfrinTestException('Emulated exception in connect')
+
+    def create_xfrinconn(self, sock_map, zone_name, rrclass, datasrc_client,
+                         shutdown_event, master_addrinfo, tsig_key):
+        conn = MockXfrinConnection(sock_map, zone_name, rrclass,
+                                   datasrc_client, shutdown_event,
+                                   master_addrinfo, tsig_key)
+
+        # An awkward check that would specifically identify an old bug
+        # where initialziation of XfrinConnection._tsig_ctx_creator caused
+        # self reference and subsequently led to reference leak.
+        orig_ref = sys.getrefcount(conn)
+        conn._tsig_ctx_creator = None
+        self.assertEqual(orig_ref, sys.getrefcount(conn))
+
+        # Replace some methods for connect with our internal ones for the
+        # convenience of tests
+        conn.connect_to_master = lambda : self.connect_to_master(conn)
+        conn.do_xfrin = lambda x, y : XFRIN_OK
+        conn.close = lambda : self.conn_close(conn)
+
+        return conn
+
+    def test_process_xfrin_normal(self):
+        # Normal, successful case.  We only check that things are cleaned up
+        # at the tearDown time.
+        process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
+                      self.master,  False, None, RRType.AXFR(),
+                      self.create_xfrinconn)
+
+    def test_process_xfrin_exception_on_connect(self):
+        # connect_to_master() will raise an exception.  Things must still be
+        # cleaned up.
+        self.do_raise_on_connect = True
+        process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
+                      self.master,  False, None, RRType.AXFR(),
+                      self.create_xfrinconn)
+
+    def test_process_xfrin_exception_on_close(self):
+        # connect() will result in exception, and even the cleanup close()
+        # will fail with an exception.  This should be quite likely a bug,
+        # but we deal with that case.
+        self.do_raise_on_connect = True
+        self.do_raise_on_close = True
+        process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
+                      self.master,  False, None, RRType.AXFR(),
+                      self.create_xfrinconn)
+
+    def test_process_xfrin_exception_on_publish(self):
+        # xfr succeeds but notifying the zonemgr fails with exception.
+        # everything must still be cleaned up.
+        self.do_raise_on_publish = True
+        process_xfrin(self, self, TEST_ZONE_NAME, TEST_RRCLASS, None, None,
+                      self.master,  False, None, RRType.AXFR(),
+                      self.create_xfrinconn)
+
 class TestXfrin(unittest.TestCase):
     def setUp(self):
         # redirect output
@@ -789,6 +2130,8 @@ class TestXfrin(unittest.TestCase):
                                                   self.args)['result'][0], 0)
         self.assertEqual(self.args['master'], self.xfr.xfrin_started_master_addr)
         self.assertEqual(int(self.args['port']), self.xfr.xfrin_started_master_port)
+        # By default we use AXFR (for now)
+        self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
 
     def test_command_handler_retransfer_short_command1(self):
         # try it when only specifying the zone name (of unknown zone)
@@ -901,6 +2244,8 @@ class TestXfrin(unittest.TestCase):
                          self.xfr.xfrin_started_master_addr)
         self.assertEqual(int(TEST_MASTER_PORT),
                          self.xfr.xfrin_started_master_port)
+        # By default we use AXFR (for now)
+        self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
 
     def test_command_handler_notify(self):
         # at this level, refresh is no different than retransfer.
@@ -909,7 +2254,7 @@ class TestXfrin(unittest.TestCase):
         self.assertEqual(self.xfr.command_handler("notify",
                                                   self.args)['result'][0], 1)
 
-    def test_command_handler_notify_known_zone(self):
+    def test_command_handler_notify_known_zone_bad_addr(self):
         # try it with a known zone
         self.args['master'] = TEST_MASTER_IPV6_ADDRESS
 
@@ -921,18 +2266,37 @@ class TestXfrin(unittest.TestCase):
                   }
                 ]}
         self.xfr.config_handler(zones)
+        # the command should now fail
         self.assertEqual(self.xfr.command_handler("notify",
-                                                  self.args)['result'][0], 0)
+                                                  self.args)['result'][0], 1)
 
-        # and see if we used the address from the command, and not from
-        # the config
-        # This is actually NOT the address given in the command, which
-        # would at this point not make sense, see the TODO in
-        # xfrin.py.in Xfrin.command_handler())
-        self.assertEqual(TEST_MASTER_IPV4_ADDRESS,
-                         self.xfr.xfrin_started_master_addr)
-        self.assertEqual(int(TEST_MASTER_PORT),
-                         self.xfr.xfrin_started_master_port)
+        # also try a different port in the actual command
+        zones = { 'zones': [
+                  { 'name': TEST_ZONE_NAME_STR,
+                    'master_addr': TEST_MASTER_IPV6_ADDRESS,
+                    'master_port': str(int(TEST_MASTER_PORT) + 1)
+                  }
+                ]}
+        self.xfr.config_handler(zones)
+        # the command should now fail
+        self.assertEqual(self.xfr.command_handler("notify",
+                                                  self.args)['result'][0], 1)
+
+
+    def test_command_handler_notify_known_zone(self):
+        # try it with a known zone
+        self.args['master'] = TEST_MASTER_IPV6_ADDRESS
+
+        # with a zone configuration that has a matching master address.
+        zones = { 'zones': [
+                  { 'name': TEST_ZONE_NAME_STR,
+                    'master_addr': TEST_MASTER_IPV6_ADDRESS,
+                    'master_port': TEST_MASTER_PORT
+                  }
+                ]}
+        self.xfr.config_handler(zones)
+        self.assertEqual(self.xfr.command_handler("notify",
+                                                  self.args)['result'][0], 0)
 
     def test_command_handler_unknown(self):
         self.assertEqual(self.xfr.command_handler("xxx", None)['result'][0], 1)
@@ -955,13 +2319,24 @@ class TestXfrin(unittest.TestCase):
                 self.assertEqual(zone_info.tsig_key.to_text(), TSIGKey(zone_config['tsig_key']).to_text())
             else:
                 self.assertIsNone(zone_info.tsig_key)
-
-    def test_command_handler_zones(self):
+            if 'use_ixfr' in zone_config and\
+               zone_config.get('use_ixfr'):
+                self.assertTrue(zone_info.use_ixfr)
+            else:
+                # if not set, should default to False
+                self.assertFalse(zone_info.use_ixfr)
+
+    def test_config_handler_zones(self):
+        # This test passes a number of good and bad configs, and checks whether
+        # the values are reflected in the structure that will dictate the
+        # actual behaviour. It also checks if bad values are correctly
+        # handled
         config1 = { 'transfers_in': 3,
                    'zones': [
                    { 'name': 'test.example.',
                     'master_addr': '192.0.2.1',
-                    'master_port': 53
+                    'master_port': 53,
+                    'use_ixfr': False
                    }
                  ]}
         self.assertEqual(self.xfr.config_handler(config1)['result'][0], 0)
@@ -972,7 +2347,8 @@ class TestXfrin(unittest.TestCase):
                    { 'name': 'test.example.',
                     'master_addr': '192.0.2.2',
                     'master_port': 53,
-                    'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g=="
+                    'tsig_key': "example.com:SFuWd/q99SzF8Yzd1QbB9g==",
+                    'use_ixfr': True
                    }
                  ]}
         self.assertEqual(self.xfr.config_handler(config2)['result'][0], 0)
@@ -1082,6 +2458,50 @@ class TestXfrin(unittest.TestCase):
         # since this has failed, we should still have the previous config
         self._check_zones_config(config2)
 
+    def test_config_handler_zones_default(self):
+        # Checking it some default config values apply.  Using a separate
+        # test case for a fresh xfr object.
+        config = { 'zones': [
+                   { 'name': 'test.example.',
+                    'master_addr': '192.0.2.1',
+                    'master_port': 53,
+                   }
+                 ]}
+        self.assertEqual(self.xfr.config_handler(config)['result'][0], 0)
+        self._check_zones_config(config)
+
+    def common_ixfr_setup(self, xfr_mode, use_ixfr):
+        # This helper method explicitly sets up a zone configuration with
+        # use_ixfr, and invokes either retransfer or refresh.
+        # Shared by some of the following test cases.
+        config = {'zones': [
+                {'name': 'example.com.',
+                 'master_addr': '192.0.2.1',
+                 'use_ixfr': use_ixfr}]}
+        self.assertEqual(self.xfr.config_handler(config)['result'][0], 0)
+        self.assertEqual(self.xfr.command_handler(xfr_mode,
+                                                  self.args)['result'][0], 0)
+
+    def test_command_handler_retransfer_ixfr_enabled(self):
+        # If IXFR is explicitly enabled in config, IXFR will be used
+        self.common_ixfr_setup('retransfer', True)
+        self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+
+    def test_command_handler_refresh_ixfr_enabled(self):
+        # Same for refresh
+        self.common_ixfr_setup('refresh', True)
+        self.assertEqual(RRType.IXFR(), self.xfr.xfrin_started_request_type)
+
+    def test_command_handler_retransfer_ixfr_disabled(self):
+        # Similar to the previous case, but explicitly disabled.  AXFR should
+        # be used.
+        self.common_ixfr_setup('retransfer', False)
+        self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
+
+    def test_command_handler_refresh_ixfr_disabled(self):
+        # Same for refresh
+        self.common_ixfr_setup('refresh', False)
+        self.assertEqual(RRType.AXFR(), self.xfr.xfrin_started_request_type)
 
 def raise_interrupt():
     raise KeyboardInterrupt()
@@ -1114,6 +2534,184 @@ class TestMain(unittest.TestCase):
         MockXfrin.check_command_hook = raise_exception
         main(MockXfrin, False)
 
+class TestXfrinProcess(unittest.TestCase):
+    """
+    Some tests for the xfrin_process function. This replaces the
+    XfrinConnection class with itself, so we can emulate whatever behavior we
+    might want.
+
+    Currently only tests for retry if IXFR fails.
+    """
+    def setUp(self):
+        """
+        Backs up the original class implementation so it can be restored
+        and places our own version in place of the constructor.
+
+        Also sets up several internal variables to watch what happens.
+        """
+        # This will hold a "log" of what transfers were attempted.
+        self.__transfers = []
+        # This will "log" if failures or successes happened.
+        self.__published = []
+        # How many connections were created.
+        self.__created_connections = 0
+
+    def __get_connection(self, *args):
+        """
+        Provides a "connection". To mock the connection and see what it is
+        asked to do, we pretend to be the connection.
+        """
+        self.__created_connections += 1
+        return self
+
+    def connect_to_master(self):
+        """
+        Part of pretending to be the connection. It pretends it connected
+        correctly every time.
+        """
+        return True
+
+    def do_xfrin(self, check_soa, request_type):
+        """
+        Part of pretending to be the connection. It looks what answer should
+        be answered now and logs what request happened.
+        """
+        self.__transfers.append(request_type)
+        ret = self.__rets[0]
+        self.__rets = self.__rets[1:]
+        return ret
+
+    def zone_str(self):
+        """
+        Part of pretending to be the connection. It provides the logging name
+        of zone.
+        """
+        return "example.org/IN"
+
+    def publish_xfrin_news(self, zone_name, rrclass, ret):
+        """
+        Part of pretending to be the server as well. This just logs the
+        success/failure of the previous operation.
+        """
+        self.__published.append(ret)
+
+    def close(self):
+        """
+        Part of pretending to be the connection.
+        """
+        pass
+
+    def init_socket(self):
+        """
+        Part of pretending to be the connection.
+        """
+        pass
+
+    def __do_test(self, rets, transfers, request_type):
+        """
+        Do the actual test. The request type, prepared sucesses/failures
+        and expected sequence of transfers is passed to specify what test
+        should happen.
+        """
+        self.__rets = rets
+        published = rets[-1]
+        xfrin.process_xfrin(self, XfrinRecorder(), Name("example.org."),
+                            RRClass.IN(), None, None, None, True, None,
+                            request_type, self.__get_connection)
+        self.assertEqual([], self.__rets)
+        self.assertEqual(transfers, self.__transfers)
+        # Create a connection for each attempt
+        self.assertEqual(len(transfers), self.__created_connections)
+        self.assertEqual([published], self.__published)
+
+    def test_ixfr_ok(self):
+        """
+        Everything OK the first time, over IXFR.
+        """
+        self.__do_test([XFRIN_OK], [RRType.IXFR()], RRType.IXFR())
+
+    def test_axfr_ok(self):
+        """
+        Everything OK the first time, over AXFR.
+        """
+        self.__do_test([XFRIN_OK], [RRType.AXFR()], RRType.AXFR())
+
+    def test_axfr_fail(self):
+        """
+        The transfer failed over AXFR. Should not be retried (we don't expect
+        to fail on AXFR, but succeed on IXFR and we didn't use IXFR in the first
+        place for some reason.
+        """
+        self.__do_test([XFRIN_FAIL], [RRType.AXFR()], RRType.AXFR())
+
+    def test_ixfr_fallback(self):
+        """
+        The transfer fails over IXFR, but suceeds over AXFR. It should fall back
+        to it and say everything is OK.
+        """
+        self.__do_test([XFRIN_FAIL, XFRIN_OK], [RRType.IXFR(), RRType.AXFR()],
+                       RRType.IXFR())
+
+    def test_ixfr_fail(self):
+        """
+        The transfer fails both over IXFR and AXFR. It should report failure
+        (only once) and should try both before giving up.
+        """
+        self.__do_test([XFRIN_FAIL, XFRIN_FAIL],
+                       [RRType.IXFR(), RRType.AXFR()], RRType.IXFR())
+class TestFormatting(unittest.TestCase):
+    # If the formatting functions are moved to a more general library
+    # (ticket #1379), these tests should be moved with them.
+    def test_format_zone_str(self):
+        self.assertEqual("example.com/IN",
+                         format_zone_str(isc.dns.Name("example.com"),
+                         isc.dns.RRClass("IN")))
+        self.assertEqual("example.com/CH",
+                         format_zone_str(isc.dns.Name("example.com"),
+                         isc.dns.RRClass("CH")))
+        self.assertEqual("example.org/IN",
+                         format_zone_str(isc.dns.Name("example.org"),
+                         isc.dns.RRClass("IN")))
+    
+    def test_format_addrinfo(self):
+        # This test may need to be updated if the input type is changed,
+        # right now it is a nested tuple:
+        # (family, sockettype, (address, port))
+        # of which sockettype is ignored
+        self.assertEqual("192.0.2.1:53",
+                         format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+                                          ("192.0.2.1", 53))))
+        self.assertEqual("192.0.2.2:53",
+                         format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+                                          ("192.0.2.2", 53))))
+        self.assertEqual("192.0.2.1:54",
+                         format_addrinfo((socket.AF_INET, socket.SOCK_STREAM,
+                                          ("192.0.2.1", 54))))
+        self.assertEqual("[2001:db8::1]:53",
+                         format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+                                          ("2001:db8::1", 53))))
+        self.assertEqual("[2001:db8::2]:53",
+                         format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+                                          ("2001:db8::2", 53))))
+        self.assertEqual("[2001:db8::1]:54",
+                         format_addrinfo((socket.AF_INET6, socket.SOCK_STREAM,
+                                          ("2001:db8::1", 54))))
+        self.assertEqual("/some/file",
+                         format_addrinfo((socket.AF_UNIX, socket.SOCK_STREAM,
+                                          "/some/file")))
+        # second element of passed tuple should be ignored
+        self.assertEqual("192.0.2.1:53",
+                         format_addrinfo((socket.AF_INET, None,
+                                          ("192.0.2.1", 53))))
+        self.assertEqual("192.0.2.1:53",
+                         format_addrinfo((socket.AF_INET, "Just some string",
+                                          ("192.0.2.1", 53))))
+        self.assertRaises(TypeError, format_addrinfo, 1)
+        self.assertRaises(TypeError, format_addrinfo,
+                                     (socket.AF_INET, "asdf"))
+        self.assertRaises(TypeError, format_addrinfo,
+                                     (socket.AF_INET, "asdf", ()))
+
 if __name__== "__main__":
     try:
         isc.log.resetUnitTestRootLogger()
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 07de8f0..1167bef 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -24,12 +24,15 @@ import struct
 import threading
 import socket
 import random
+from functools import reduce
 from optparse import OptionParser, OptionValueError
 from isc.config.ccsession import *
 from isc.notify import notify_out
 import isc.util.process
+from isc.datasrc import DataSourceClient, ZoneFinder
 import isc.net.parse
-from xfrin_messages import *
+from isc.xfrin.diff import Diff
+from isc.log_messages.xfrin_messages import *
 
 isc.log.init("b10-xfrin")
 logger = isc.log.Logger("xfrin")
@@ -62,6 +65,9 @@ ZONE_MANAGER_MODULE_NAME = 'Zonemgr'
 REFRESH_FROM_ZONEMGR = 'refresh_from_zonemgr'
 ZONE_XFRIN_FAILED = 'zone_xfrin_failed'
 
+# Constants for debug levels.
+DBG_XFRIN_TRACE = logger.DBGLVL_TRACE_BASIC
+
 # These two default are currently hard-coded. For config this isn't
 # necessary, but we need these defaults for optional command arguments
 # (TODO: have similar support to get default values for command
@@ -70,13 +76,24 @@ DEFAULT_MASTER_PORT = 53
 DEFAULT_ZONE_CLASS = RRClass.IN()
 
 __version__ = 'BIND10'
-# define xfrin rcode
-XFRIN_OK = 0
-XFRIN_FAIL = 1
+
+# Internal result codes of an xfr session
+XFRIN_OK = 0                    # normal success
+XFRIN_FAIL = 1                  # general failure (internal/external)
 
 class XfrinException(Exception):
     pass
 
+class XfrinProtocolError(Exception):
+    '''An exception raised for errors encountered in xfrin protocol handling.
+    '''
+    pass
+
+class XfrinZoneUptodate(Exception):
+    '''TBD
+    '''
+    pass
+
 class XfrinZoneInfoException(Exception):
     """This exception is raised if there is an error in the given
        configuration (part), or when a command does not have a required
@@ -112,60 +129,524 @@ def _check_zone_class(zone_class_str):
     except InvalidRRClass as irce:
         raise XfrinZoneInfoException("bad zone class: " + zone_class_str + " (" + str(irce) + ")")
 
+def format_zone_str(zone_name, zone_class):
+    """Helper function to format a zone name and class as a string of
+       the form '<name>/<class>'.
+       Parameters:
+       zone_name (isc.dns.Name) name to format
+       zone_class (isc.dns.RRClass) class to format
+    """
+    return zone_name.to_text(True) + '/' + str(zone_class)
+
+def format_addrinfo(addrinfo):
+    """Helper function to format the addrinfo as a string of the form
+       <addr>:<port> (for IPv4) or [<addr>]:port (for IPv6). For unix domain
+       sockets, and unknown address families, it returns a basic string
+       conversion of the third element of the passed tuple.
+       Parameters:
+       addrinfo: a 3-tuple consisting of address family, socket type, and,
+                 depending on the family, either a 2-tuple with the address
+                 and port, or a filename
+    """
+    try:
+        if addrinfo[0] == socket.AF_INET:
+            return str(addrinfo[2][0]) + ":" + str(addrinfo[2][1])
+        elif addrinfo[0] == socket.AF_INET6:
+            return "[" + str(addrinfo[2][0]) + "]:" + str(addrinfo[2][1])
+        else:
+            return str(addrinfo[2])
+    except IndexError:
+        raise TypeError("addrinfo argument to format_addrinfo() does not "
+                        "appear to be consisting of (family, socktype, (addr, port))")
+
+def get_soa_serial(soa_rdata):
+    '''Extract the serial field of SOA RDATA and return it as a Serial object.
+
+    We don't have to be very efficient here, so we first dump the entire RDATA
+    as a string and convert the first corresponding field.  This should be
+    sufficient in practice, but may not always work when the MNAME or RNAME
+    contains an (escaped) space character in their labels.  Ideally there
+    should be a more direct and convenient way to get access to the SOA
+    fields.
+    '''
+    return Serial(int(soa_rdata.to_text().split()[2]))
+
+class XfrinState:
+    '''
+    The states of the incomding *XFR state machine.
+
+    We (will) handle both IXFR and AXFR with a single integrated state
+    machine because they cannot be distinguished immediately - an AXFR
+    response to an IXFR request can only be detected when the first two (2)
+    response RRs have already been received.
+
+    The following diagram summarizes the state transition.  After sending
+    the query, xfrin starts the process with the InitialSOA state (all
+    IXFR/AXFR response begins with an SOA).  When it reaches IXFREnd
+    or AXFREnd, the process successfully completes.
+
+                             (AXFR or
+            (recv SOA)        AXFR-style IXFR)  (SOA, add)
+    InitialSOA------->FirstData------------->AXFR--------->AXFREnd
+         |                |                  |  ^         (post xfr
+         |(IXFR &&        |                  |  |        checks, then
+         | recv SOA       |                  +--+        commit)
+         | not new)       |            (non SOA, add)
+         V                |
+    IXFRUptodate          |                     (non SOA, delete)
+               (pure IXFR,|                           +-------+
+            keep handling)|             (Delete SOA)  V       |
+                          + ->IXFRDeleteSOA------>IXFRDelete--+
+                                   ^                   |
+                (see SOA, not end, |          (see SOA)|
+            commit, keep handling) |                   |
+                                   |                   V
+                      +---------IXFRAdd<----------+IXFRAddSOA
+        (non SOA, add)|         ^  |    (Add SOA)
+                      ----------+  |
+                                   |(see SOA w/ end serial, commit changes)
+                                   V
+                                IXFREnd
+
+    Note that changes are committed for every "difference sequence"
+    (i.e. changes for one SOA update).  This means when an IXFR response
+    contains multiple difference sequences and something goes wrong
+    after several commits, these changes have been published and visible
+    to clients even if the IXFR session is subsequently aborted.
+    It is not clear if this is valid in terms of the protocol specification.
+    Section 4 of RFC 1995 states:
+
+       An IXFR client, should only replace an older version with a newer
+       version after all the differences have been successfully processed.
+
+    If this "replacement" is for the changes of one difference sequence
+    and "all the differences" mean the changes for that sequence, this
+    implementation strictly follows what RFC states.  If this is for
+    the entire IXFR response (that may contain multiple sequences),
+    we should implement it with one big transaction and one final commit
+    at the very end.
+
+    For now, we implement it with multiple smaller commits for two
+    reasons.  First, this is what BIND 9 does, and we generally port
+    the implementation logic here.  BIND 9 has been supporting IXFR
+    for many years, so the fact that it still behaves this way
+    probably means it at least doesn't cause a severe operational
+    problem in practice.  Second, especially because BIND 10 would
+    often uses a database backend, a larger transaction could cause an
+    undesirable effects, e.g. suspending normal lookups for a longer
+    period depending on the characteristics of the database.  Even if
+    we find something wrong in a later sequeunce and abort the
+    session, we can start another incremental update from what has
+    been validated, or we can switch to AXFR to replace the zone
+    completely.
+
+    This implementation uses the state design pattern, where each state
+    is represented as a subclass of the base XfrinState class.  Each concrete
+    subclass of XfrinState is assumed to define two methods: handle_rr() and
+    finish_message().  These methods handle specific part of XFR protocols
+    and (if necessary) perform the state transition.
+
+    Conceptually, XfrinState and its subclasses are a "friend" of
+    XfrinConnection and are assumed to be allowed to access its internal
+    information (even though Python does not have a strict access control
+    between different classes).
+
+    The XfrinState and its subclasses are designed to be stateless, and
+    can be used as singleton objects.  For now, however, we always instantiate
+    a new object for every state transition, partly because the introduction
+    of singleton will make a code bit complicated, and partly because
+    the overhead of object instantiotion wouldn't be significant for xfrin.
+
+    '''
+    def set_xfrstate(self, conn, new_state):
+        '''Set the XfrConnection to a given new state.
+
+        As a "friend" class, this method intentionally gets access to the
+        connection's "private" method.
+
+        '''
+        conn._XfrinConnection__set_xfrstate(new_state)
+
+    def handle_rr(self, conn):
+        '''Handle one RR of an XFR response message.
+
+        Depending on the state, the RR is generally added or deleted in the
+        corresponding data source, or in some special cases indicates
+        a specifi transition, such as starting a new IXFR difference
+        sequence or completing the session.
+
+        All subclass has their specific behaviors for this method, so
+        there is no default definition.  If the base class version
+        is called, it's a bug of the caller, and it's notified via
+        an XfrinException exception.
+
+        This method returns a boolean value: True if the given RR was
+        fully handled and the caller should go to the next RR; False
+        if the caller needs to call this method with the (possibly) new
+        state for the same RR again.
+
+        '''
+        raise XfrinException("Internal bug: " +
+                             "XfrinState.handle_rr() called directly")
+
+    def finish_message(self, conn):
+        '''Perform any final processing after handling all RRs of a response.
+
+        This method then returns a boolean indicating whether to continue
+        receiving the message.  Unless it's in the end of the entire XFR
+        session, we should continue, so this default method simply returns
+        True.
+
+        '''
+        return True
+
+class XfrinInitialSOA(XfrinState):
+    def handle_rr(self, conn, rr):
+        if rr.get_type() != RRType.SOA():
+            raise XfrinProtocolError('First RR in zone transfer must be SOA ('
+                                     + rr.get_type().to_text() + ' received)')
+        conn._end_serial = get_soa_serial(rr.get_rdata()[0])
+
+        if conn._request_type == RRType.IXFR() and \
+                conn._end_serial <= conn._request_serial:
+            logger.info(XFRIN_IXFR_UPTODATE, conn.zone_str(),
+                        conn._request_serial, conn._end_serial)
+            self.set_xfrstate(conn, XfrinIXFRUptodate())
+        else:
+            self.set_xfrstate(conn, XfrinFirstData())
+
+        return True
+
+class XfrinFirstData(XfrinState):
+    def handle_rr(self, conn, rr):
+        '''Handle the first RR after initial SOA in an XFR session.
+
+        This state happens exactly once in an XFR session, where
+        we decide whether it's incremental update ("real" IXFR) or
+        non incremental update (AXFR or AXFR-style IXFR).
+        If we initiated IXFR and the transfer begins with two SOAs
+        (the serial of the second one being equal to our serial),
+        it's incremental; otherwise it's non incremental.
+
+        This method always return False (unlike many other handle_rr()
+        methods) because this first RR must be examined again in the
+        determined update context.
+
+        Note that in the non incremental case the RR should normally be
+        something other SOA, but it's still possible it's an SOA with a
+        different serial than ours.  The only possible interpretation at
+        this point is that it's non incremental update that only consists
+        of the SOA RR.  It will result in broken zone (for example, it
+        wouldn't even contain an apex NS) and should be rejected at post
+        XFR processing, but in terms of the XFR session processing we
+        accept it and move forward.
+
+        Note further that, in the half-broken SOA-only transfer case,
+        these two SOAs are supposed to be the same as stated in Section 2.2
+        of RFC 5936.  We don't check that condition here, either; we'll
+        leave whether and how to deal with that situation to the end of
+        the processing of non incremental update.  See also a related
+        discussion at the IETF dnsext wg:
+        http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+
+        '''
+        if conn._request_type == RRType.IXFR() and \
+                rr.get_type() == RRType.SOA() and \
+                conn._request_serial == get_soa_serial(rr.get_rdata()[0]):
+            logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_INCREMENTAL_RESP,
+                         conn.zone_str())
+            self.set_xfrstate(conn, XfrinIXFRDeleteSOA())
+        else:
+            logger.debug(DBG_XFRIN_TRACE, XFRIN_GOT_NONINCREMENTAL_RESP,
+                 conn.zone_str())
+            # We are now going to add RRs to the new zone.  We need create
+            # a Diff object.  It will be used throughtout the XFR session.
+            # DISABLE FOR DEBUG
+            conn._diff = Diff(conn._datasrc_client, conn._zone_name, True)
+            self.set_xfrstate(conn, XfrinAXFR())
+        return False
+
+class XfrinIXFRDeleteSOA(XfrinState):
+    def handle_rr(self, conn, rr):
+        if rr.get_type() != RRType.SOA():
+            # this shouldn't happen; should this occur it means an internal
+            # bug.
+            raise XfrinException(rr.get_type().to_text() +
+                                 ' RR is given in IXFRDeleteSOA state')
+        # This is the beginning state of one difference sequence (changes
+        # for one SOA update).  We need to create a new Diff object now.
+        # Note also that we (unconditionally) enable journaling here.  The
+        # Diff constructor may internally disable it, however, if the
+        # underlying data source doesn't support journaling.
+        conn._diff = Diff(conn._datasrc_client, conn._zone_name, False, True)
+        conn._diff.delete_data(rr)
+        self.set_xfrstate(conn, XfrinIXFRDelete())
+        return True
+
+class XfrinIXFRDelete(XfrinState):
+    def handle_rr(self, conn, rr):
+        if rr.get_type() == RRType.SOA():
+            # This is the only place where current_serial is set
+            conn._current_serial = get_soa_serial(rr.get_rdata()[0])
+            self.set_xfrstate(conn, XfrinIXFRAddSOA())
+            return False
+        conn._diff.delete_data(rr)
+        return True
+
+class XfrinIXFRAddSOA(XfrinState):
+    def handle_rr(self, conn, rr):
+        if rr.get_type() != RRType.SOA():
+            # this shouldn't happen; should this occur it means an internal
+            # bug.
+            raise XfrinException(rr.get_type().to_text() +
+                                 ' RR is given in IXFRAddSOA state')
+        conn._diff.add_data(rr)
+        self.set_xfrstate(conn, XfrinIXFRAdd())
+        return True
+
+class XfrinIXFRAdd(XfrinState):
+    def handle_rr(self, conn, rr):
+        if rr.get_type() == RRType.SOA():
+            soa_serial = get_soa_serial(rr.get_rdata()[0])
+            if soa_serial == conn._end_serial:
+                conn._diff.commit()
+                self.set_xfrstate(conn, XfrinIXFREnd())
+                return True
+            elif soa_serial != conn._current_serial:
+                raise XfrinProtocolError('IXFR out of sync: expected ' +
+                                         'serial ' +
+                                         str(conn._current_serial) +
+                                         ', got ' + str(soa_serial))
+            else:
+                conn._diff.commit()
+                self.set_xfrstate(conn, XfrinIXFRDeleteSOA())
+                return False
+        conn._diff.add_data(rr)
+        return True
+
+class XfrinIXFREnd(XfrinState):
+    def handle_rr(self, conn, rr):
+        raise XfrinProtocolError('Extra data after the end of IXFR diffs: ' +
+                                 rr.to_text())
+
+    def finish_message(self, conn):
+        '''Final processing after processing an entire IXFR session.
+
+        There will be more actions here, but for now we simply return False,
+        indicating there will be no more message to receive.
+
+        '''
+        return False
+
+class XfrinIXFRUptodate(XfrinState):
+    def handle_rr(self, conn, rr):
+        raise XfrinProtocolError('Extra data after single IXFR response ' +
+                                 rr.to_text())
+
+    def finish_message(self, conn):
+        raise XfrinZoneUptodate
+
+class XfrinAXFR(XfrinState):
+    def handle_rr(self, conn, rr):
+        """
+        Handle the RR by putting it into the zone.
+        """
+        conn._diff.add_data(rr)
+        if rr.get_type() == RRType.SOA():
+            # SOA means end.  Don't commit it yet - we need to perform
+            # post-transfer checks
+
+            soa_serial = get_soa_serial(rr.get_rdata()[0])
+            if conn._end_serial != soa_serial:
+                logger.warn(XFRIN_AXFR_INCONSISTENT_SOA, conn.zone_str(),
+                            conn._end_serial, soa_serial)
+
+            self.set_xfrstate(conn, XfrinAXFREnd())
+        # Yes, we've eaten this RR.
+        return True
+
+class XfrinAXFREnd(XfrinState):
+    def handle_rr(self, conn, rr):
+        raise XfrinProtocolError('Extra data after the end of AXFR: ' +
+                                 rr.to_text())
+
+    def finish_message(self, conn):
+        """
+        Final processing after processing an entire AXFR session.
+
+        In this process all the AXFR changes are committed to the
+        data source.
+
+        There might be more actions here, but for now we simply return False,
+        indicating there will be no more message to receive.
+
+        """
+        conn._diff.commit()
+        return False
+
 class XfrinConnection(asyncore.dispatcher):
     '''Do xfrin in this class. '''
 
     def __init__(self,
-                 sock_map, zone_name, rrclass, db_file, shutdown_event,
-                 master_addrinfo, tsig_key = None, verbose = False,
-                 idle_timeout = 60):
-        ''' idle_timeout: max idle time for read data from socket.
-            db_file: specify the data source file.
-            check_soa: when it's true, check soa first before sending xfr query
+                 sock_map, zone_name, rrclass, datasrc_client,
+                 shutdown_event, master_addrinfo, db_file, tsig_key=None,
+                 idle_timeout=60):
+        '''Constructor of the XfirnConnection class.
+
+        db_file: SQLite3 DB file.  Unforutnately we still need this for
+                 temporary workaround in _get_zone_soa().  This should be
+                 removed when we eliminate the need for the workaround.
+        idle_timeout: max idle time for read data from socket.
+        datasrc_client: the data source client object used for the XFR session.
+                        This will eventually replace db_file completely.
+
         '''
 
         asyncore.dispatcher.__init__(self, map=sock_map)
-        self.create_socket(master_addrinfo[0], master_addrinfo[1])
+
+        # The XFR state.  Conceptually this is purely private, so we emphasize
+        # the fact by the double underscore.  Other classes are assumed to
+        # get access to this via get_xfrstate(), and only XfrinState classes
+        # are assumed to be allowed to modify it via __set_xfrstate().
+        self.__state = None
+
+        # Requested transfer type (RRType.AXFR or RRType.IXFR).  The actual
+        # transfer type may differ due to IXFR->AXFR fallback:
+        self._request_type = None
+
+        # Zone parameters
         self._zone_name = zone_name
-        self._sock_map = sock_map
         self._rrclass = rrclass
+
+        # Data source handler
         self._db_file = db_file
+        self._datasrc_client = datasrc_client
+        self._zone_soa = self._get_zone_soa()
+
+        self._sock_map = sock_map
         self._soa_rr_count = 0
         self._idle_timeout = idle_timeout
-        self.setblocking(1)
         self._shutdown_event = shutdown_event
-        self._verbose = verbose
-        self._master_address = master_addrinfo[2]
+        self._master_addrinfo = master_addrinfo
         self._tsig_key = tsig_key
         self._tsig_ctx = None
         # tsig_ctx_creator is introduced to allow tests to use a mock class for
         # easier tests (in normal case we always use the default)
-        self._tsig_ctx_creator = self.__create_tsig_ctx
+        self._tsig_ctx_creator = lambda key : TSIGContext(key)
 
-    def __create_tsig_ctx(self, key):
-        return TSIGContext(key)
+    def init_socket(self):
+        '''Initialize the underlyig socket.
+
+        This is essentially a part of __init__() and is expected to be
+        called immediately after the constructor.  It's separated from
+        the constructor because otherwise we might not be able to close
+        it if the constructor raises an exception after opening the socket.
+        '''
+        self.create_socket(self._master_addrinfo[0], self._master_addrinfo[1])
+        self.setblocking(1)
+
+    def _get_zone_soa(self):
+        '''Retrieve the current SOA RR of the zone to be transferred.
+
+        It will be used for various purposes in subsequent xfr protocol
+        processing.   It is validly possible that the zone is currently
+        empty and therefore doesn't have an SOA, so this method doesn't
+        consider it an error and returns None in such a case.  It may or
+        may not result in failure in the actual processing depending on
+        how the SOA is used.
+
+        When the zone has an SOA RR, this method makes sure that it's
+        valid, i.e., it has exactly one RDATA; if it is not the case
+        this method returns None.
+
+        If the underlying data source doesn't even know the zone, this method
+        tries to provide backward compatible behavior where xfrin is
+        responsible for creating zone in the corresponding DB table.
+        For a longer term we should deprecate this behavior by introducing
+        more generic zone management framework, but at the moment we try
+        to not surprise existing users.  (Note also that the part of
+        providing the compatible behavior uses the old data source API.
+        We'll deprecate this API in a near future, too).
+
+        '''
+        # get the zone finder.  this must be SUCCESS (not even
+        # PARTIALMATCH) because we are specifying the zone origin name.
+        result, finder = self._datasrc_client.find_zone(self._zone_name)
+        if result != DataSourceClient.SUCCESS:
+            # The data source doesn't know the zone.  For now, we provide
+            # backward compatibility and creates a new one ourselves.
+            isc.datasrc.sqlite3_ds.load(self._db_file,
+                                        self._zone_name.to_text(),
+                                        lambda : [])
+            logger.warn(XFRIN_ZONE_CREATED, self.zone_str())
+            # try again
+            result, finder = self._datasrc_client.find_zone(self._zone_name)
+        if result != DataSourceClient.SUCCESS:
+            return None
+        result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
+                                        None, ZoneFinder.FIND_DEFAULT)
+        if result != ZoneFinder.SUCCESS:
+            logger.info(XFRIN_ZONE_NO_SOA, self.zone_str())
+            return None
+        if soa_rrset.get_rdata_count() != 1:
+            logger.warn(XFRIN_ZONE_MULTIPLE_SOA, self.zone_str(),
+                        soa_rrset.get_rdata_count())
+            return None
+        return soa_rrset
+
+    def __set_xfrstate(self, new_state):
+        self.__state = new_state
+
+    def get_xfrstate(self):
+        return self.__state
+
+    def zone_str(self):
+        '''A convenience function for logging to include zone name and class'''
+        return format_zone_str(self._zone_name, self._rrclass)
 
     def connect_to_master(self):
         '''Connect to master in TCP.'''
 
         try:
-            self.connect(self._master_address)
+            self.connect(self._master_addrinfo[2])
             return True
         except socket.error as e:
-            logger.error(XFRIN_CONNECT_MASTER, self._master_address, str(e))
+            logger.error(XFRIN_CONNECT_MASTER, self._master_addrinfo[2],
+                         str(e))
             return False
 
     def _create_query(self, query_type):
-        '''Create dns query message. '''
+        '''Create an XFR-related query message.
+
+        query_type is either SOA, AXFR or IXFR.  An IXFR query needs the
+        zone's current SOA record.  If it's not known, it raises an
+        XfrinException exception.  Note that this may not necessarily a
+        broken configuration; for the first attempt of transfer the secondary
+        may not have any boot-strap zone information, in which case IXFR
+        simply won't work.  The xfrin should then fall back to AXFR.
+        _request_serial is recorded for later use.
 
+        '''
         msg = Message(Message.RENDER)
         query_id = random.randint(0, 0xFFFF)
         self._query_id = query_id
         msg.set_qid(query_id)
         msg.set_opcode(Opcode.QUERY())
         msg.set_rcode(Rcode.NOERROR())
-        query_question = Question(Name(self._zone_name), self._rrclass, query_type)
-        msg.add_question(query_question)
+        msg.add_question(Question(self._zone_name, self._rrclass, query_type))
+
+        # Remember our serial, if known
+        self._request_serial = get_soa_serial(self._zone_soa.get_rdata()[0]) \
+            if self._zone_soa is not None else None
+
+        # Set the authority section with our SOA for IXFR
+        if query_type == RRType.IXFR():
+            if self._zone_soa is None:
+                # (incremental) IXFR doesn't work without known SOA
+                raise XfrinException('Failed to create IXFR query due to no ' +
+                                     'SOA for ' + self.zone_str())
+            msg.add_rrset(Message.SECTION_AUTHORITY, self._zone_soa)
+
         return msg
 
     def _send_data(self, data):
@@ -219,7 +700,8 @@ class XfrinConnection(asyncore.dispatcher):
         if self._tsig_ctx is not None:
             tsig_error = self._tsig_ctx.verify(tsig_record, response_data)
             if tsig_error != TSIGError.NOERROR:
-                raise XfrinException('TSIG verify fail: %s' % str(tsig_error))
+                raise XfrinProtocolError('TSIG verify fail: %s' %
+                                         str(tsig_error))
         elif tsig_record is not None:
             # If the response includes a TSIG while we didn't sign the query,
             # we treat it as an error.  RFC doesn't say anything about this
@@ -228,13 +710,78 @@ class XfrinConnection(asyncore.dispatcher):
             # implementation would return such a response, and since this is
             # part of security mechanism, it's probably better to be more
             # strict.
-            raise XfrinException('Unexpected TSIG in response')
+            raise XfrinProtocolError('Unexpected TSIG in response')
+
+    def __parse_soa_response(self, msg, response_data):
+        '''Parse a response to SOA query and extract the SOA from answer.
+
+        This is a subroutine of _check_soa_serial().  This method also
+        validates message, and rejects bogus responses with XfrinProtocolError.
+
+        If everything is okay, it returns the SOA RR from the answer section
+        of the response.
+
+        '''
+        # Check TSIG integrity and validate the header.  Unlike AXFR/IXFR,
+        # we should be more strict for SOA queries and check the AA flag, too.
+        self._check_response_tsig(msg, response_data)
+        self._check_response_header(msg)
+        if not msg.get_header_flag(Message.HEADERFLAG_AA):
+            raise XfrinProtocolError('non-authoritative answer to SOA query')
+
+        # Validate the question section
+        n_question = msg.get_rr_count(Message.SECTION_QUESTION)
+        if n_question != 1:
+            raise XfrinProtocolError('Invalid response to SOA query: ' +
+                                     '(' + str(n_question) + ' questions, 1 ' +
+                                     'expected)')
+        resp_question = msg.get_question()[0]
+        if resp_question.get_name() != self._zone_name or \
+                resp_question.get_class() != self._rrclass or \
+                resp_question.get_type() != RRType.SOA():
+            raise XfrinProtocolError('Invalid response to SOA query: '
+                                     'question mismatch: ' +
+                                     str(resp_question))
+
+        # Look into the answer section for SOA
+        soa = None
+        for rr in msg.get_section(Message.SECTION_ANSWER):
+            if rr.get_type() == RRType.SOA():
+                if soa is not None:
+                    raise XfrinProtocolError('SOA response had multiple SOAs')
+                soa = rr
+            # There should not be a CNAME record at top of zone.
+            if rr.get_type() == RRType.CNAME():
+                raise XfrinProtocolError('SOA query resulted in CNAME')
+
+        # If SOA is not found, try to figure out the reason then report it.
+        if soa is None:
+            # See if we have any SOA records in the authority section.
+            for rr in msg.get_section(Message.SECTION_AUTHORITY):
+                if rr.get_type() == RRType.NS():
+                    raise XfrinProtocolError('SOA query resulted in referral')
+                if rr.get_type() == RRType.SOA():
+                    raise XfrinProtocolError('SOA query resulted in NODATA')
+            raise XfrinProtocolError('No SOA record found in response to ' +
+                                     'SOA query')
+
+        # Check if the SOA is really what we asked for
+        if soa.get_name() != self._zone_name or \
+                soa.get_class() != self._rrclass:
+            raise XfrinProtocolError("SOA response doesn't match query: " +
+                                     str(soa))
+
+        # All okay, return it
+        return soa
+
 
     def _check_soa_serial(self):
-        ''' Compare the soa serial, if soa serial in master is less than
-        the soa serial in local, Finish xfrin.
-        False: soa serial in master is less or equal to the local one.
-        True: soa serial in master is bigger
+        '''Send SOA query and compare the local and remote serials.
+
+        If we know our local serial and the remote serial isn't newer
+        than ours, we abort the session with XfrinZoneUptodate.
+        On success it returns XFRIN_OK for testing.  The caller won't use it.
+
         '''
 
         self._send_query(RRType.SOA())
@@ -242,53 +789,75 @@ class XfrinConnection(asyncore.dispatcher):
         msg_len = socket.htons(struct.unpack('H', data_len)[0])
         soa_response = self._get_request_response(msg_len)
         msg = Message(Message.PARSE)
-        msg.from_wire(soa_response)
+        msg.from_wire(soa_response, Message.PRESERVE_ORDER)
+
+        # Validate/parse the rest of the response, and extract the SOA
+        # from the answer section
+        soa = self.__parse_soa_response(msg, soa_response)
+
+        # Compare the two serials.  If ours is 'new', abort with ZoneUptodate.
+        primary_serial = get_soa_serial(soa.get_rdata()[0])
+        if self._request_serial is not None and \
+                self._request_serial >= primary_serial:
+            if self._request_serial != primary_serial:
+                logger.info(XFRIN_ZONE_SERIAL_AHEAD, primary_serial,
+                            self.zone_str(),
+                            format_addrinfo(self._master_addrinfo),
+                            self._request_serial)
+            raise XfrinZoneUptodate
 
-        # TSIG related checks, including an unexpected signed response
-        self._check_response_tsig(msg, soa_response)
-
-        # perform some minimal level validation.  It's an open issue how
-        # strict we should be (see the comment in _check_response_header())
-        self._check_response_header(msg)
-
-        # TODO, need select soa record from data source then compare the two
-        # serial, current just return OK, since this function hasn't been used
-        # now.
         return XFRIN_OK
 
-    def do_xfrin(self, check_soa, ixfr_first = False):
-        '''Do xfr by sending xfr request and parsing response. '''
+    def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
+        '''Do an xfr session by sending xfr request and parsing responses.'''
 
         try:
             ret = XFRIN_OK
+            self._request_type = request_type
+            # Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
+            # to hardcode here.
+            req_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
             if check_soa:
-                logstr = 'SOA check for \'%s\' ' % self._zone_name
-                ret =  self._check_soa_serial()
-
-            if ret == XFRIN_OK:
-                logger.info(XFRIN_AXFR_TRANSFER_STARTED, self._zone_name)
-                self._send_query(RRType.AXFR())
-                isc.datasrc.sqlite3_ds.load(self._db_file, self._zone_name,
-                                            self._handle_xfrin_response)
-
-                logger.info(XFRIN_AXFR_TRANSFER_SUCCESS, self._zone_name)
-
-        except XfrinException as e:
-            logger.error(XFRIN_AXFR_TRANSFER_FAILURE, self._zone_name, str(e))
+                self._check_soa_serial()
+
+            logger.info(XFRIN_XFR_TRANSFER_STARTED, req_str, self.zone_str())
+            self._send_query(self._request_type)
+            self.__state = XfrinInitialSOA()
+            self._handle_xfrin_responses()
+            logger.info(XFRIN_XFR_TRANSFER_SUCCESS, req_str, self.zone_str())
+
+        except XfrinZoneUptodate:
+            # Eventually we'll probably have to treat this case as a trigger
+            # of trying another primary server, etc, but for now we treat it
+            # as "success".
+            pass
+        except XfrinProtocolError as e:
+            logger.info(XFRIN_XFR_TRANSFER_PROTOCOL_ERROR, req_str,
+                        self.zone_str(),
+                        format_addrinfo(self._master_addrinfo), str(e))
             ret = XFRIN_FAIL
-            #TODO, recover data source.
-        except isc.datasrc.sqlite3_ds.Sqlite3DSError as e:
-            logger.error(XFRIN_AXFR_DATABASE_FAILURE, self._zone_name, str(e))
+        except XfrinException as e:
+            logger.error(XFRIN_XFR_TRANSFER_FAILURE, req_str,
+                         self.zone_str(),
+                         format_addrinfo(self._master_addrinfo), str(e))
             ret = XFRIN_FAIL
-        except UserWarning as e:
-            # XXX: this is an exception from our C++ library via the
-            # Boost.Python binding.  It would be better to have more more
-            # specific exceptions, but at this moment this is the finest
-            # granularity.
-            logger.error(XFRIN_AXFR_INTERNAL_FAILURE, self._zone_name, str(e))
+        except Exception as e:
+            # Catching all possible exceptions like this is generally not a
+            # good practice, but handling an xfr session could result in
+            # so many types of exceptions, including ones from the DNS library
+            # or from the data source library.  Eventually we'd introduce a
+            # hierarchy for exception classes from a base "ISC exception" and
+            # catch it here, but until then we need broadest coverage so that
+            # we won't miss anything.
+
+            logger.error(XFRIN_XFR_OTHER_FAILURE, req_str,
+                         self.zone_str(), str(e))
             ret = XFRIN_FAIL
         finally:
-           self.close()
+            # Make sure any remaining transaction in the diff is closed
+            # (if not yet - possible in case of xfr-level exception) as soon
+            # as possible
+            self._diff = None
 
         return ret
 
@@ -305,60 +874,31 @@ class XfrinConnection(asyncore.dispatcher):
 
         msg_rcode = msg.get_rcode()
         if msg_rcode != Rcode.NOERROR():
-            raise XfrinException('error response: %s' % msg_rcode.to_text())
+            raise XfrinProtocolError('error response: %s' %
+                                     msg_rcode.to_text())
 
         if not msg.get_header_flag(Message.HEADERFLAG_QR):
-            raise XfrinException('response is not a response')
+            raise XfrinProtocolError('response is not a response')
 
         if msg.get_qid() != self._query_id:
-            raise XfrinException('bad query id')
+            raise XfrinProtocolError('bad query id')
 
     def _check_response_status(self, msg):
         '''Check validation of xfr response. '''
 
         self._check_response_header(msg)
 
-        if msg.get_rr_count(Message.SECTION_ANSWER) == 0:
-            raise XfrinException('answer section is empty')
-
         if msg.get_rr_count(Message.SECTION_QUESTION) > 1:
-            raise XfrinException('query section count greater than 1')
-
-    def _handle_answer_section(self, answer_section):
-        '''Return a generator for the reponse in one tcp package to a zone transfer.'''
-
-        for rrset in answer_section:
-            rrset_name = rrset.get_name().to_text()
-            rrset_ttl = int(rrset.get_ttl().to_text())
-            rrset_class = rrset.get_class().to_text()
-            rrset_type = rrset.get_type().to_text()
-
-            for rdata in rrset.get_rdata():
-                # Count the soa record count
-                if rrset.get_type() == RRType.SOA():
-                    self._soa_rr_count += 1
-
-                    # XXX: the current DNS message parser can't preserve the
-                    # RR order or separete the beginning and ending SOA RRs.
-                    # As a short term workaround, we simply ignore the second
-                    # SOA, and ignore the erroneous case where the transfer
-                    # session doesn't end with an SOA.
-                    if (self._soa_rr_count == 2):
-                        # Avoid inserting soa record twice
-                        break
-
-                rdata_text = rdata.to_text()
-                yield (rrset_name, rrset_ttl, rrset_class, rrset_type,
-                       rdata_text)
-
-    def _handle_xfrin_response(self):
-        '''Return a generator for the response to a zone transfer. '''
-        while True:
+            raise XfrinProtocolError('query section count greater than 1')
+
+    def _handle_xfrin_responses(self):
+        read_next_msg = True
+        while read_next_msg:
             data_len = self._get_request_response(2)
             msg_len = socket.htons(struct.unpack('H', data_len)[0])
             recvdata = self._get_request_response(msg_len)
             msg = Message(Message.PARSE)
-            msg.from_wire(recvdata)
+            msg.from_wire(recvdata, Message.PRESERVE_ORDER)
 
             # TSIG related checks, including an unexpected signed response
             self._check_response_tsig(msg, recvdata)
@@ -366,12 +906,12 @@ class XfrinConnection(asyncore.dispatcher):
             # Perform response status validation
             self._check_response_status(msg)
 
-            answer_section = msg.get_section(Message.SECTION_ANSWER)
-            for rr in self._handle_answer_section(answer_section):
-                yield rr
+            for rr in msg.get_section(Message.SECTION_ANSWER):
+                rr_handled = False
+                while not rr_handled:
+                    rr_handled = self.__state.handle_rr(self, rr)
 
-            if self._soa_rr_count == 2:
-                break
+            read_next_msg = self.__state.finish_message(self)
 
             if self._shutdown_event.is_set():
                 raise XfrinException('xfrin is forced to stop')
@@ -388,28 +928,100 @@ class XfrinConnection(asyncore.dispatcher):
 
         return False
 
-    def log_info(self, msg, type='info'):
-        # Overwrite the log function, log nothing
-        pass
-
-def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
-                  shutdown_event, master_addrinfo, check_soa, verbose,
-                  tsig_key):
-    xfrin_recorder.increment(zone_name)
-    sock_map = {}
-    conn = XfrinConnection(sock_map, zone_name, rrclass, db_file,
-                           shutdown_event, master_addrinfo,
-                           tsig_key, verbose)
+def __process_xfrin(server, zone_name, rrclass, db_file,
+                    shutdown_event, master_addrinfo, check_soa, tsig_key,
+                    request_type, conn_class):
+    conn = None
+    exception = None
     ret = XFRIN_FAIL
-    if conn.connect_to_master():
-        ret = conn.do_xfrin(check_soa)
+    try:
+        # Create a data source client used in this XFR session.  Right now we
+        # still assume an sqlite3-based data source, and use both the old and new
+        # data source APIs.  We also need to use a mock client for tests.
+        # For a temporary workaround to deal with these situations, we skip the
+        # creation when the given file is none (the test case).  Eventually
+        # this code will be much cleaner.
+        datasrc_client = None
+        if db_file is not None:
+            # temporary hardcoded sqlite initialization. Once we decide on
+            # the config specification, we need to update this (TODO)
+            # this may depend on #1207, or any followup ticket created for #1207
+            datasrc_type = "sqlite3"
+            datasrc_config = "{ \"database_file\": \"" + db_file + "\"}"
+            datasrc_client = DataSourceClient(datasrc_type, datasrc_config)
+
+        # Create a TCP connection for the XFR session and perform the operation.
+        sock_map = {}
+        # In case we were asked to do IXFR and that one fails, we try again with
+        # AXFR. But only if we could actually connect to the server.
+        #
+        # So we start with retry as True, which is set to false on each attempt.
+        # In the case of connected but failed IXFR, we set it to true once again.
+        retry = True
+        while retry:
+            retry = False
+            conn = conn_class(sock_map, zone_name, rrclass, datasrc_client,
+                              shutdown_event, master_addrinfo, db_file,
+                              tsig_key)
+            conn.init_socket()
+            ret = XFRIN_FAIL
+            if conn.connect_to_master():
+                ret = conn.do_xfrin(check_soa, request_type)
+                if ret == XFRIN_FAIL and request_type == RRType.IXFR():
+                    # IXFR failed for some reason. It might mean the server can't
+                    # handle it, or we don't have the zone or we are out of sync or
+                    # whatever else. So we retry with with AXFR, as it may succeed
+                    # in many such cases.
+                    retry = True
+                    request_type = RRType.AXFR()
+                    logger.warn(XFRIN_XFR_TRANSFER_FALLBACK, conn.zone_str())
+                    conn.close()
+                    conn = None
+
+    except Exception as ex:
+        # If exception happens, just remember it here so that we can re-raise
+        # after cleaning up things.  We don't log it here because we want
+        # eliminate smallest possibility of having an exception in logging
+        # itself.
+        exception = ex
+
+    # asyncore.dispatcher requires explicit close() unless its lifetime
+    # from born to destruction is closed within asyncore.loop, which is not
+    # the case for us.  We always close() here, whether or not do_xfrin
+    # succeeds, and even when we see an unexpected exception.
+    if conn is not None:
+        conn.close()
 
     # Publish the zone transfer result news, so zonemgr can reset the
     # zone timer, and xfrout can notify the zone's slaves if the result
     # is success.
     server.publish_xfrin_news(zone_name, rrclass, ret)
+
+    if exception is not None:
+        raise exception
+
+def process_xfrin(server, xfrin_recorder, zone_name, rrclass, db_file,
+                  shutdown_event, master_addrinfo, check_soa, tsig_key,
+                  request_type, conn_class=XfrinConnection):
+    # Even if it should be rare, the main process of xfrin session can
+    # raise an exception.  In order to make sure the lock in xfrin_recorder
+    # is released in any cases, we delegate the main part to the helper
+    # function in the try block, catch any exceptions, then release the lock.
+    xfrin_recorder.increment(zone_name)
+    exception = None
+    try:
+        __process_xfrin(server, zone_name, rrclass, db_file,
+                        shutdown_event, master_addrinfo, check_soa, tsig_key,
+                        request_type, conn_class)
+    except Exception as ex:
+        # don't log it until we complete decrement().
+        exception = ex
     xfrin_recorder.decrement(zone_name)
 
+    if exception is not None:
+        typestr = "AXFR" if request_type == RRType.AXFR() else "IXFR"
+        logger.error(XFRIN_XFR_PROCESS_FAILURE, typestr, zone_name.to_text(),
+                     str(rrclass), str(exception))
 
 class XfrinRecorder:
     def __init__(self):
@@ -451,6 +1063,7 @@ class ZoneInfo:
         self.set_master_port(config_data.get('master_port'))
         self.set_zone_class(config_data.get('class'))
         self.set_tsig_key(config_data.get('tsig_key'))
+        self.set_use_ixfr(config_data.get('use_ixfr'))
 
     def set_name(self, name_str):
         """Set the name for this zone given a name string.
@@ -525,18 +1138,28 @@ class ZoneInfo:
                 errmsg = "bad TSIG key string: " + tsig_key_str
                 raise XfrinZoneInfoException(errmsg)
 
+    def set_use_ixfr(self, use_ixfr):
+        """Set use_ixfr. If set to True, it will use
+           IXFR for incoming transfers. If set to False, it will use AXFR.
+           At this moment there is no automatic fallback"""
+        # TODO: http://bind10.isc.org/ticket/1279
+        if use_ixfr is None:
+            self.use_ixfr = \
+                self._module_cc.get_default_value("zones/use_ixfr")
+        else:
+            self.use_ixfr = use_ixfr
+
     def get_master_addr_info(self):
         return (self.master_addr.family, socket.SOCK_STREAM,
                 (str(self.master_addr), self.master_port))
 
 class Xfrin:
-    def __init__(self, verbose = False):
+    def __init__(self):
         self._max_transfers_in = 10
         self._zones = {}
         self._cc_setup()
         self.recorder = XfrinRecorder()
         self._shutdown_event = threading.Event()
-        self._verbose = verbose
 
     def _cc_setup(self):
         '''This method is used only as part of initialization, but is
@@ -623,21 +1246,37 @@ class Xfrin:
                 # a security hole. Once we add the ability to have multiple master addresses,
                 # we should check if it matches one of them, and then use it.)
                 (zone_name, rrclass) = self._parse_zone_name_and_class(args)
+                zone_str = format_zone_str(zone_name, rrclass)
                 zone_info = self._get_zone_info(zone_name, rrclass)
+                notify_addr = self._parse_master_and_port(args, zone_name,
+                                                          rrclass)
                 if zone_info is None:
                     # TODO what to do? no info known about zone. defaults?
-                    errmsg = "Got notification to retransfer unknown zone " + zone_name.to_text()
-                    logger.error(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_name.to_text())
+                    errmsg = "Got notification to retransfer unknown zone " + zone_str
+                    logger.info(XFRIN_RETRANSFER_UNKNOWN_ZONE, zone_str)
                     answer = create_answer(1, errmsg)
                 else:
+                    request_type = RRType.AXFR()
+                    if zone_info.use_ixfr:
+                        request_type = RRType.IXFR()
                     master_addr = zone_info.get_master_addr_info()
-                    ret = self.xfrin_start(zone_name,
-                                           rrclass,
-                                           self._get_db_file(),
-                                           master_addr,
-                                           zone_info.tsig_key,
-                                           True)
-                    answer = create_answer(ret[0], ret[1])
+                    if notify_addr[0] == master_addr[0] and\
+                       notify_addr[2] == master_addr[2]:
+                        ret = self.xfrin_start(zone_name,
+                                               rrclass,
+                                               self._get_db_file(),
+                                               master_addr,
+                                               zone_info.tsig_key, request_type,
+                                               True)
+                        answer = create_answer(ret[0], ret[1])
+                    else:
+                        notify_addr_str = format_addrinfo(notify_addr)
+                        master_addr_str = format_addrinfo(master_addr)
+                        errmsg = "Got notification for " + zone_str\
+                               + "from unknown address: " + notify_addr_str;
+                        logger.info(XFRIN_NOTIFY_UNKNOWN_MASTER, zone_str,
+                                    notify_addr_str, master_addr_str)
+                        answer = create_answer(1, errmsg)
 
             elif command == 'retransfer' or command == 'refresh':
                 # Xfrin receives the retransfer/refresh from cmdctl(sent by bindctl).
@@ -648,14 +1287,17 @@ class Xfrin:
                                                           rrclass)
                 zone_info = self._get_zone_info(zone_name, rrclass)
                 tsig_key = None
+                request_type = RRType.AXFR()
                 if zone_info:
                     tsig_key = zone_info.tsig_key
+                    if zone_info.use_ixfr:
+                        request_type = RRType.IXFR()
                 db_file = args.get('db_file') or self._get_db_file()
                 ret = self.xfrin_start(zone_name,
                                        rrclass,
                                        db_file,
                                        master_addr,
-                                       tsig_key,
+                                       tsig_key, request_type,
                                        (False if command == 'retransfer' else True))
                 answer = create_answer(ret[0], ret[1])
 
@@ -735,7 +1377,8 @@ class Xfrin:
         news(command: zone_new_data_ready) to zone manager and xfrout.
         if xfrin failed, just tell the bad news to zone manager, so that
         it can reset the refresh timer for that zone. '''
-        param = {'zone_name': zone_name, 'zone_class': zone_class.to_text()}
+        param = {'zone_name': zone_name.to_text(),
+                 'zone_class': zone_class.to_text()}
         if xfr_result == XFRIN_OK:
             msg = create_command(notify_out.ZONE_NEW_DATA_READY_CMD, param)
             # catch the exception, in case msgq has been killed.
@@ -772,8 +1415,8 @@ class Xfrin:
         while not self._shutdown_event.is_set():
             self._cc_check_command()
 
-    def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo, tsig_key,
-                    check_soa = True):
+    def xfrin_start(self, zone_name, rrclass, db_file, master_addrinfo,
+                    tsig_key, request_type, check_soa=True):
         if "pydnspp" not in sys.modules:
             return (1, "xfrin failed, can't load dns message python library: 'pydnspp'")
 
@@ -787,13 +1430,12 @@ class Xfrin:
         xfrin_thread = threading.Thread(target = process_xfrin,
                                         args = (self,
                                                 self.recorder,
-                                                zone_name.to_text(),
+                                                zone_name,
                                                 rrclass,
                                                 db_file,
                                                 self._shutdown_event,
                                                 master_addrinfo, check_soa,
-                                                self._verbose,
-                                                tsig_key))
+                                                tsig_key, request_type))
 
         xfrin_thread.start()
         return (0, 'zone xfrin is started')
@@ -812,9 +1454,9 @@ def set_signal_handler():
 
 def set_cmd_options(parser):
     parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
-            help="display more about what is going on")
+            help="This option is obsolete and has no effect.")
 
-def main(xfrin_class, use_signal = True):
+def main(xfrin_class, use_signal=True):
     """The main loop of the Xfrin daemon.
 
     @param xfrin_class: A class of the Xfrin object.  This is normally Xfrin,
@@ -831,7 +1473,7 @@ def main(xfrin_class, use_signal = True):
 
         if use_signal:
             set_signal_handler()
-        xfrind = xfrin_class(verbose = options.verbose)
+        xfrind = xfrin_class()
         xfrind.startup()
     except KeyboardInterrupt:
         logger.info(XFRIN_STOPPED_BY_KEYBOARD)
diff --git a/src/bin/xfrin/xfrin.spec b/src/bin/xfrin/xfrin.spec
index a3e62ce..c1ba61e 100644
--- a/src/bin/xfrin/xfrin.spec
+++ b/src/bin/xfrin/xfrin.spec
@@ -43,6 +43,11 @@
           { "item_name": "tsig_key",
             "item_type": "string",
             "item_optional": true
+          },
+          { "item_name": "use_ixfr",
+            "item_type": "boolean",
+            "item_optional": false,
+            "item_default": false
           }
           ]
         }
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 80a0be3..5e182d8 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -15,25 +15,92 @@
 # No namespace declaration - these constants go in the global namespace
 # of the xfrin messages python module.
 
-% XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to an internal
-problem in the bind10 python wrapper library.
+% XFRIN_ZONE_CREATED Zone %1 not found in the given data source, newly created
+On starting an xfrin session, it is identified that the zone to be
+transferred is not found in the data source.  This can happen if a
+secondary DNS server first tries to perform AXFR from a primary server
+without creating the zone image beforehand (e.g. by b10-loadzone).  As
+of this writing the xfrin process provides backward compatible
+behavior to previous versions: creating a new one in the data source
+not to surprise existing users too much.  This is probably not a good
+idea, however, in terms of who should be responsible for managing
+zones at a higher level.  In future it is more likely that a separate
+zone management framework is provided, and the situation where the
+given zone isn't found in xfrout will be treated as an error.
+
+% XFRIN_ZONE_NO_SOA Zone %1 does not have SOA
+On starting an xfrin session, it is identified that the zone to be
+transferred does not have an SOA RR in the data source.  This is not
+necessarily an error; if a secondary DNS server first tries to perform
+transfer from a primary server, the zone can be empty, and therefore
+doesn't have an SOA.  Subsequent AXFR will fill in the zone; if the
+attempt is IXFR it will fail in query creation.
+
+% XFRIN_ZONE_MULTIPLE_SOA Zone %1 has %2 SOA RRs
+On starting an xfrin session, it is identified that the zone to be
+transferred has multiple SOA RRs.  Such a zone is broken, but could be
+accidentally configured especially in a data source using "non
+captive" backend database.  The implementation ignores entire SOA RRs
+and tries to continue processing as if the zone were empty.  This
+means subsequent AXFR can succeed and possibly replace the zone with
+valid content, but an IXFR attempt will fail.
+
+% XFRIN_ZONE_SERIAL_AHEAD Serial number (%1) for %2 received from master %3 < ours (%4)
+The response to an SOA query prior to xfr indicated that the zone's
+SOA serial at the primary server is smaller than that of the xfrin
+client.  This is not necessarily an error especially if that
+particular primary server is another secondary server which hasn't got
+the latest version of the zone.  But if the primary server is known to
+be the real source of the zone, some unexpected inconsistency may have
+happened, and you may want to take a closer look.  In this case xfrin
+doesn't perform subsequent zone transfer.
+
+% XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module.  Possible reasons are a broken DNS message or failure
+in database connection.  The error is shown in the log message.
+
+% XFRIN_XFR_TRANSFER_PROTOCOL_ERROR %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to a protocol
+error, such as an unexpected response from the primary server.  The
+error is shown in the log message.  It may be because the primary
+server implementation is broken or (although less likely) there was
+some attack attempt, but it can also happen due to configuration
+mismatch such as the remote server does not have authority for the
+zone any more but the local configuration hasn't been updated.  So it
+is recommended to check the primary server configuration.
+
+% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to an internal error.
 The error is shown in the log message.
 
-% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.
-
-% XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to a protocol error.
-The error is shown in the log message.
-
-% XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started
+% XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+
+% XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4
+An XFR session failed outside the main protocol handling.  This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure.  Details of the error are shown in
+the log message.  In general, these errors are not really expected
+ones, and indicate an installation error or a program bug.  The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete.  So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+
+% XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started
 A connection to the master server has been made, the serial value in
 the SOA record has been checked, and a zone transfer has been started.
 
-% XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded
-The AXFR transfer of the given zone was successfully completed.
+% XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded
+The XFR transfer of the given zone was successfully completed.
 
 % XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1
 The given master address is not a valid IP address.
@@ -69,6 +136,12 @@ was killed.
 There was a problem sending a message to the zone manager. This most
 likely means that the msgq daemon has quit or was killed.
 
+% XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
+
 % XFRIN_IMPORT_DNS error importing python DNS module: %1
 There was an error importing the python DNS module pydnspp. The most
 likely cause is a PYTHONPATH problem.
@@ -89,3 +162,42 @@ daemon will now shut down.
 % XFRIN_UNKNOWN_ERROR unknown error: %1
 An uncaught exception was raised while running the xfrin daemon. The
 exception message is printed in the log message.
+
+% XFRIN_IXFR_UPTODATE IXFR requested serial for %1 is %2, master has %3, not updating
+The first SOA record in an IXFR response indicates the zone's serial
+at the primary server is not newer than the client's.  This is
+basically unexpected event because normally the client first checks
+the SOA serial by an SOA query, but can still happen if the transfer
+is manually invoked or (although unlikely) there is a rapid change at
+the primary server between the SOA and IXFR queries.  The client
+implementation confirms the whole response is this single SOA, and
+aborts the transfer just like a successful case.
+
+% XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found.  This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+
+% XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA.  Non incremental transfer is
+either AXFR or AXFR-style IXFR.  In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
+
+% XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same.  According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold.  There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected.  On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs.  For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found.  If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
diff --git a/src/bin/xfrout/Makefile.am b/src/bin/xfrout/Makefile.am
index c5492ad..6100e64 100644
--- a/src/bin/xfrout/Makefile.am
+++ b/src/bin/xfrout/Makefile.am
@@ -6,9 +6,13 @@ pkglibexec_SCRIPTS = b10-xfrout
 
 b10_xfroutdir = $(pkgdatadir)
 b10_xfrout_DATA = xfrout.spec
-pyexec_DATA = xfrout_messages.py
 
-CLEANFILES=	b10-xfrout xfrout.pyc xfrout.spec xfrout_messages.py xfrout_messages.pyc
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = b10-xfrout xfrout.pyc xfrout.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.pyc
 
 man_MANS = b10-xfrout.8
 EXTRA_DIST = $(man_MANS) b10-xfrout.xml xfrout_messages.mes
@@ -21,14 +25,15 @@ b10-xfrout.8: b10-xfrout.xml
 endif
 
 # Define rule to build logging source files from message file
-xfrout_messages.py: xfrout_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message -p $(top_srcdir)/src/bin/xfrout/xfrout_messages.mes
+$(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py : xfrout_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/xfrout_messages.mes
 
 xfrout.spec: xfrout.spec.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.spec.pre >$@
 
 # this is done here since configure.ac AC_OUTPUT doesn't expand exec_prefix
-b10-xfrout: xfrout.py xfrout_messages.py
+b10-xfrout: xfrout.py $(PYTHON_LOGMSGPKG_DIR)/work/xfrout_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" xfrout.py >$@
 	chmod a+x $@
diff --git a/src/bin/xfrout/b10-xfrout.8 b/src/bin/xfrout/b10-xfrout.8
index c8b4b07..c810c2f 100644
--- a/src/bin/xfrout/b10-xfrout.8
+++ b/src/bin/xfrout/b10-xfrout.8
@@ -71,6 +71,19 @@ The configurable settings are:
 defines the maximum number of outgoing zone transfers that can run concurrently\&. The default is 10\&.
 .PP
 
+\fItsig_key_ring\fR
+A list of TSIG keys (each of which is in the form of name:base64\-key[:algorithm]) used for access control on transfer requests\&. The default is an empty list\&.
+.PP
+
+\fItransfer_acl\fR
+A list of ACL elements that apply to all transfer requests by default (unless overridden in zone_config)\&. See the BIND 10 guide for configuration examples\&. The default is an element that allows any transfer requests\&.
+.PP
+
+\fIzone_config\fR
+A list of JSON objects (i\&.e\&. maps) that define per zone configuration concerning
+\fBb10\-xfrout\fR\&. The supported names of each object are "origin" (the origin name of the zone), "class" (the RR class of the zone, optional, default to "IN"), and "acl_element" (ACL only applicable to transfer requests for that zone)\&. See the BIND 10 guide for configuration examples\&. The default is an empty list, that is, no zone specific configuration\&.
+.PP
+
 \fIlog_name\fR
 .PP
 
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index ad71fe2..4f6a7fa 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -98,6 +98,31 @@
       that can run concurrently. The default is 10.
     </para>
     <para>
+      <varname>tsig_key_ring</varname>
+      A list of TSIG keys (each of which is in the form of
+      name:base64-key[:algorithm]) used for access control on transfer
+      requests.
+      The default is an empty list.
+    </para>
+    <para>
+      <varname>transfer_acl</varname>
+      A list of ACL elements that apply to all transfer requests by
+      default (unless overridden in zone_config).  See the BIND 10
+      guide for configuration examples.
+      The default is an element that allows any transfer requests.
+    </para>
+    <para>
+      <varname>zone_config</varname>
+      A list of JSON objects (i.e. maps) that define per zone
+      configuration concerning <command>b10-xfrout</command>.
+      The supported names of each object are "origin" (the origin
+      name of the zone), "class" (the RR class of the zone, optional,
+      default to "IN"), and "acl_element" (ACL only applicable to
+      transfer requests for that zone).
+      See the BIND 10 guide for configuration examples.
+      The default is an empty list, that is, no zone specific configuration.
+    </para>
+    <para>
       <varname>log_name</varname>
 <!-- TODO -->
     </para>
@@ -134,6 +159,14 @@
       data storage types.
     </simpara></note>
 
+
+<!--
+
+tsig_key_ring list of
+tsig_key string
+
+-->
+
 <!-- TODO: formating -->
     <para>
       The configuration commands are:
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 99f4843..ad6d7e6 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -1,15 +1,24 @@
 PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
 PYTESTS = xfrout_test.py
-EXTRA_DIST = $(PYTESTS)
+noinst_SCRIPTS = $(PYTESTS)
+
+EXTRA_DIST = testdata/test.sqlite3
+# These are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/creatediff.py
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
+# We set B10_FROM_BUILD below, so that the test can refer to the in-source
+# spec file.
 check-local:
 if ENABLE_PYTHON_COVERAGE
 	touch $(abs_top_srcdir)/.coverage 
@@ -18,7 +27,10 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
+	chmod +x $(abs_builddir)/$$pytest ; \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_builddir)/src/bin/xfrout:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+	TESTDATASRCDIR=$(abs_srcdir)/testdata/ \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrout/tests/testdata/creatediff.py b/src/bin/xfrout/tests/testdata/creatediff.py
new file mode 100755
index 0000000..dab6622
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/creatediff.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3.1
+
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''This script was used to create zone differences for IXFR tests.
+
+The result was stored in the test SQLite3 database file, so this script
+itself isn't necessary for testing.  It's provided here for reference
+purposes.
+
+'''
+
+import isc.datasrc
+import isc.log
+from isc.dns import *
+from isc.testutils.rrset_utils import *
+
+isc.log.init("dummy")           # XXX
+
+ZONE_NAME = Name('example.com')
+NS_NAME_STR = 'a.dns.example.com'
+NS_NAME = Name(NS_NAME_STR)
+
+client = isc.datasrc.DataSourceClient('sqlite3',
+                                      '{ "database_file": "test.sqlite3" }')
+
+# Install the initial data
+updater = client.get_updater(ZONE_NAME, True)
+updater.add_rrset(create_soa(2011111802))
+updater.add_rrset(create_ns(NS_NAME_STR))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.add_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.commit()
+
+# Incremental update to generate diffs
+updater = client.get_updater(ZONE_NAME, False, True)
+updater.delete_rrset(create_soa(2011111802))
+updater.add_rrset(create_soa(2011111900))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.2', 7200))
+updater.delete_rrset(create_soa(2011111900))
+updater.delete_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.delete_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.add_rrset(create_soa(2011112001))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.1'))
+updater.commit()
diff --git a/src/bin/xfrout/tests/testdata/example.com b/src/bin/xfrout/tests/testdata/example.com
new file mode 100644
index 0000000..8458d09
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/example.com
@@ -0,0 +1,6 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+example.com.         3600  IN  SOA master.example.com. admin.example.com. 2011112001 3600 1800 2419200 7200
+example.com.         3600  IN  NS  a.dns.example.com.
+a.dns.example.com.   3600  IN  A    192.0.2.1
+a.dns.example.com.   7200  IN  A    192.0.2.2
diff --git a/src/bin/xfrout/tests/testdata/test.sqlite3 b/src/bin/xfrout/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..9eb14f1
Binary files /dev/null and b/src/bin/xfrout/tests/testdata/test.sqlite3 differ
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index e353a60..ea4de27 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -20,14 +20,27 @@ import unittest
 import os
 from isc.testutils.tsigctx_mock import MockTSIGContext
 from isc.cc.session import *
-from pydnspp import *
+import isc.config
+from isc.dns import *
+from isc.testutils.rrset_utils import *
 from xfrout import *
 import xfrout
 import isc.log
 import isc.acl.dns
 
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
 TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
 
+#
+# Commonly used (mostly constant) test parameters
+#
+TEST_ZONE_NAME_STR = "example.com."
+TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
+TEST_RRCLASS = RRClass.IN()
+IXFR_OK_VERSION = 2011111802
+IXFR_NG_VERSION = 2011112800
+SOA_CURRENT_VERSION = 2011112001
+
 # our fake socket, where we can read and insert messages
 class MySocket():
     def __init__(self, family, type):
@@ -54,19 +67,99 @@ class MySocket():
         self.sendqueue = self.sendqueue[size:]
         return result
 
-    def read_msg(self):
+    def read_msg(self, parse_options=Message.PARSE_DEFAULT, need_len=False):
         sent_data = self.readsent()
         get_msg = Message(Message.PARSE)
-        get_msg.from_wire(bytes(sent_data[2:]))
+        get_msg.from_wire(bytes(sent_data[2:]), parse_options)
+        if need_len:
+            return (get_msg, len(sent_data) - 2)
         return get_msg
 
     def clear_send(self):
         del self.sendqueue[:]
 
-# We subclass the Session class we're testing here, only
-# to override the handle() and _send_data() method
+class MockDataSrcClient:
+    def __init__(self, type, config):
+        pass
+
+    def find_zone(self, zone_name):
+        '''Mock version of find_zone().
+
+        It returns itself (subsequently acting as a mock ZoneFinder) for
+        some test zone names.  For a special name it returns NOTFOUND to
+        emulate the condition where the specified zone doen't exist.
+
+        '''
+        self._zone_name = zone_name
+        if zone_name == Name('notauth.example.com'):
+            return (isc.datasrc.DataSourceClient.NOTFOUND, None)
+        return (isc.datasrc.DataSourceClient.SUCCESS, self)
+
+    def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
+        '''Mock ZoneFinder.find().
+
+        (At the moment) this method only handles query for type SOA.
+        By default it returns a normal SOA RR(set) whose owner name is
+        the query name  It also emulates some unusual cases for special
+        zone names.
+
+        '''
+        if name == Name('nosoa.example.com') and rrtype == RRType.SOA():
+            return (ZoneFinder.NXDOMAIN, None)
+        elif name == Name('multisoa.example.com') and rrtype == RRType.SOA():
+            soa_rrset = create_soa(SOA_CURRENT_VERSION)
+            soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
+            return (ZoneFinder.SUCCESS, soa_rrset)
+        elif rrtype == RRType.SOA():
+            return (ZoneFinder.SUCCESS, create_soa(SOA_CURRENT_VERSION))
+        raise ValueError('Unexpected input to mock finder: bug in test case?')
+
+    def get_iterator(self, zone_name, adjust_ttl=False):
+        if zone_name == Name('notauth.example.com'):
+            raise isc.datasrc.Error('no such zone')
+        self._zone_name = zone_name
+        return self
+
+    def get_soa(self):  # emulate ZoneIterator.get_soa()
+        if self._zone_name == Name('nosoa.example.com'):
+            return None
+        soa_rrset = create_soa(SOA_CURRENT_VERSION)
+        if self._zone_name == Name('multisoa.example.com'):
+            soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
+        return soa_rrset
+
+    def get_journal_reader(self, zone_name, begin_serial, end_serial):
+        if zone_name == Name('notauth2.example.com'):
+            return isc.datasrc.ZoneJournalReader.NO_SUCH_ZONE, None
+        if zone_name == Name('nojournal.example.com'):
+            raise isc.datasrc.NotImplemented('journaling not supported')
+        if begin_serial == IXFR_NG_VERSION:
+            return isc.datasrc.ZoneJournalReader.NO_SUCH_VERSION, None
+        return isc.datasrc.ZoneJournalReader.SUCCESS, self
+
+class MyCCSession(isc.config.ConfigData):
+    def __init__(self):
+        module_spec = isc.config.module_spec_from_file(
+            xfrout.SPECFILE_LOCATION)
+        ConfigData.__init__(self, module_spec)
+
+    def get_remote_config_value(self, module_name, identifier):
+        if module_name == "Auth" and identifier == "database_file":
+            return "initdb.file", False
+        else:
+            return "unknown", False
+
+# This constant dictionary stores all default configuration parameters
+# defined in the xfrout spec file.
+DEFAULT_CONFIG = MyCCSession().get_full_config()
+
+# We subclass the Session class we're testing here, only overriding a few
+# methods
 class MyXfroutSession(XfroutSession):
-    def handle(self):
+    def _handle(self):
+        pass
+
+    def _close_socket(self):
         pass
 
     def _send_data(self, sock, data):
@@ -79,12 +172,23 @@ class MyXfroutSession(XfroutSession):
 class Dbserver:
     def __init__(self):
         self._shutdown_event = threading.Event()
+        self.transfer_counter = 0
+        self._max_transfers_out = DEFAULT_CONFIG['transfers_out']
     def get_db_file(self):
-        return None
+        return 'test.sqlite3'
+    def increase_transfers_counter(self):
+        self.transfer_counter += 1
+        return True
     def decrease_transfers_counter(self):
-        pass
+        self.transfer_counter -= 1
+
+class TestXfroutSessionBase(unittest.TestCase):
+    '''Base classs for tests related to xfrout sessions
 
-class TestXfroutSession(unittest.TestCase):
+    This class defines common setup/teadown and utility methods.  Actual
+    tests are delegated to subclasses.
+
+    '''
     def getmsg(self):
         msg = Message(Message.PARSE)
         msg.from_wire(self.mdata)
@@ -101,49 +205,160 @@ class TestXfroutSession(unittest.TestCase):
     def message_has_tsig(self, msg):
         return msg.get_tsig_record() is not None
 
-    def create_request_data_with_tsig(self):
+    def create_request_data(self, with_question=True, with_tsig=False,
+                            ixfr=None, qtype=None, zone_name=TEST_ZONE_NAME,
+                            soa_class=TEST_RRCLASS, num_soa=1):
+        '''Create a commonly used XFR request data.
+
+        By default the request type is AXFR; if 'ixfr' is an integer,
+        the request type will be IXFR and an SOA with the serial being
+        the value of the parameter will be included in the authority
+        section.
+
+        This method has various minor parameters only for creating bad
+        format requests for testing purposes:
+        qtype: the RR type of the question section.  By default automatically
+               determined by the value of ixfr, but could be an invalid type
+               for testing.
+        zone_name: the query (zone) name.  for IXFR, it's also used as
+                   the owner name of the SOA in the authority section.
+        soa_class: IXFR only.  The RR class of the SOA RR in the authority
+                   section.
+        num_soa: IXFR only.  The number of SOA RDATAs in the authority
+                 section.
+        '''
         msg = Message(Message.RENDER)
         query_id = 0x1035
         msg.set_qid(query_id)
         msg.set_opcode(Opcode.QUERY())
         msg.set_rcode(Rcode.NOERROR())
-        query_question = Question(Name("example.com."), RRClass.IN(), RRType.AXFR())
-        msg.add_question(query_question)
+        req_type = RRType.AXFR() if ixfr is None else RRType.IXFR()
+        if with_question:
+            msg.add_question(Question(zone_name, RRClass.IN(),
+                                      req_type if qtype is None else qtype))
+        if req_type == RRType.IXFR():
+            soa = RRset(zone_name, soa_class, RRType.SOA(), RRTTL(0))
+            # In the RDATA only the serial matters.
+            for i in range(0, num_soa):
+                soa.add_rdata(Rdata(RRType.SOA(), soa_class,
+                                    'm r ' + str(ixfr) + ' 1 1 1 1'))
+            msg.add_rrset(Message.SECTION_AUTHORITY, soa)
 
         renderer = MessageRenderer()
-        tsig_ctx = MockTSIGContext(TSIG_KEY)
-        msg.to_wire(renderer, tsig_ctx)
-        reply_data = renderer.get_data()
-        return reply_data
+        if with_tsig:
+            tsig_ctx = MockTSIGContext(TSIG_KEY)
+            msg.to_wire(renderer, tsig_ctx)
+        else:
+            msg.to_wire(renderer)
+        request_data = renderer.get_data()
+        return request_data
+
+    def set_request_type(self, type):
+        self.xfrsess._request_type = type
+        if type == RRType.AXFR():
+            self.xfrsess._request_typestr = 'AXFR'
+        else:
+            self.xfrsess._request_typestr = 'IXFR'
 
     def setUp(self):
         self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
         self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
-                                       TSIGKeyRing(), ('127.0.0.1', 12345),
+                                       TSIGKeyRing(),
+                                       (socket.AF_INET, socket.SOCK_STREAM,
+                                        ('127.0.0.1', 12345)),
                                        # When not testing ACLs, simply accept
                                        isc.acl.dns.REQUEST_LOADER.load(
-                                           [{"action": "ACCEPT"}]))
-        self.mdata = bytes(b'\xd6=\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x07example\x03com\x00\x00\xfc\x00\x01')
-        self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
+                                           [{"action": "ACCEPT"}]),
+                                       {})
+        self.set_request_type(RRType.AXFR()) # test AXFR by default
+        self.mdata = self.create_request_data()
+        self.soa_rrset = create_soa(SOA_CURRENT_VERSION)
+        # some test replaces a module-wide function.  We should ensure the
+        # original is used elsewhere.
+        self.orig_get_rrset_len = xfrout.get_rrset_len
+
+    def tearDown(self):
+        xfrout.get_rrset_len = self.orig_get_rrset_len
+        # transfer_counter must be always be reset no matter happens within
+        # the XfroutSession object.  We check the condition here.
+        self.assertEqual(0, self.xfrsess._server.transfer_counter)
+
+class TestXfroutSession(TestXfroutSessionBase):
+    def test_quota_error(self):
+        '''Emulating the server being too busy.
+
+        '''
+        self.xfrsess._request_data = self.mdata
+        self.xfrsess._server.increase_transfers_counter = lambda : False
+        XfroutSession._handle(self.xfrsess)
+        self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.REFUSED())
+
+    def test_quota_ok(self):
+        '''The default case in terms of the xfrout quota.
+
+        '''
+        # set up a bogus request, which should result in FORMERR. (it only
+        # has to be something that is different from the previous case)
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=IXFR_OK_VERSION, num_soa=2)
+        # Replace the data source client to avoid datasrc related exceptions
+        self.xfrsess.ClientClass = MockDataSrcClient
+        XfroutSession._handle(self.xfrsess)
+        self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.FORMERR())
+
+    def test_exception_from_session(self):
+        '''Test the case where the main processing raises an exception.
+
+        We just check it doesn't any unexpected disruption and (in tearDown)
+        transfer_counter is correctly reset to 0.
+
+        '''
+        def dns_xfrout_start(fd, msg, quota):
+            raise ValueError('fake exception')
+        self.xfrsess.dns_xfrout_start = dns_xfrout_start
+        XfroutSession._handle(self.xfrsess)
 
     def test_parse_query_message(self):
+        # Valid AXFR
         [get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
+        self.assertEqual(RRType.AXFR(), self.xfrsess._request_type)
         self.assertEqual(get_rcode.to_text(), "NOERROR")
 
+        # Valid IXFR
+        request_data = self.create_request_data(ixfr=2011111801)
+        rcode, msg = self.xfrsess._parse_query_message(request_data)
+        self.assertEqual(RRType.IXFR(), self.xfrsess._request_type)
+        self.assertEqual(Rcode.NOERROR(), rcode)
+
+        # Broken request: no question
+        self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+                          self.create_request_data(with_question=False))
+
+        # Broken request: invalid RR type (neither AXFR nor IXFR)
+        self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+                          self.create_request_data(qtype=RRType.A()))
+
+        # NOERROR
+        request_data = self.create_request_data(ixfr=IXFR_OK_VERSION)
+        rcode, msg = self.xfrsess._parse_query_message(request_data)
+        self.assertEqual(rcode.to_text(), "NOERROR")
+
         # tsig signed query message
-        request_data = self.create_request_data_with_tsig()
+        request_data = self.create_request_data(with_tsig=True)
         # BADKEY
         [rcode, msg] = self.xfrsess._parse_query_message(request_data)
         self.assertEqual(rcode.to_text(), "NOTAUTH")
         self.assertTrue(self.xfrsess._tsig_ctx is not None)
         # NOERROR
-        self.xfrsess._tsig_key_ring.add(TSIG_KEY)
+        self.assertEqual(TSIGKeyRing.SUCCESS,
+                         self.xfrsess._tsig_key_ring.add(TSIG_KEY))
         [rcode, msg] = self.xfrsess._parse_query_message(request_data)
         self.assertEqual(rcode.to_text(), "NOERROR")
         self.assertTrue(self.xfrsess._tsig_ctx is not None)
 
+    def check_transfer_acl(self, acl_setter):
         # ACL checks, put some ACL inside
-        self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+        acl_setter(isc.acl.dns.REQUEST_LOADER.load([
             {
                 "from": "127.0.0.1",
                 "action": "ACCEPT"
@@ -152,29 +367,141 @@ class TestXfroutSession(unittest.TestCase):
                 "from": "192.0.2.1",
                 "action": "DROP"
             }
-        ])
+        ]))
         # Localhost (the default in this test) is accepted
         rcode, msg = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(rcode.to_text(), "NOERROR")
         # This should be dropped completely, therefore returning None
-        self.xfrsess._remote = ('192.0.2.1', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.1', 12345))
         rcode, msg = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(None, rcode)
         # This should be refused, therefore REFUSED
-        self.xfrsess._remote = ('192.0.2.2', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.2', 12345))
         rcode, msg = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(rcode.to_text(), "REFUSED")
+
+        # TSIG signed request
+        request_data = self.create_request_data(with_tsig=True)
+
         # If the TSIG check fails, it should not check ACL
         # (If it checked ACL as well, it would just drop the request)
-        self.xfrsess._remote = ('192.0.2.1', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.1', 12345))
         self.xfrsess._tsig_key_ring = TSIGKeyRing()
         rcode, msg = self.xfrsess._parse_query_message(request_data)
         self.assertEqual(rcode.to_text(), "NOTAUTH")
         self.assertTrue(self.xfrsess._tsig_ctx is not None)
 
-    def test_get_query_zone_name(self):
-        msg = self.getmsg()
-        self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
+        # ACL using TSIG: successful case
+        acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+            {"key": "example.com", "action": "ACCEPT"}, {"action": "REJECT"}
+        ]))
+        self.assertEqual(TSIGKeyRing.SUCCESS,
+                         self.xfrsess._tsig_key_ring.add(TSIG_KEY))
+        [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+        self.assertEqual(rcode.to_text(), "NOERROR")
+
+        # ACL using TSIG: key name doesn't match; should be rejected
+        acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+            {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+        ]))
+        [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+        self.assertEqual(rcode.to_text(), "REFUSED")
+
+        # ACL using TSIG: no TSIG; should be rejected
+        acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+            {"key": "example.org", "action": "ACCEPT"}, {"action": "REJECT"}
+        ]))
+        [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+        self.assertEqual(rcode.to_text(), "REFUSED")
+
+        #
+        # ACL using IP + TSIG: both should match
+        #
+        acl_setter(isc.acl.dns.REQUEST_LOADER.load([
+                {"ALL": [{"key": "example.com"}, {"from": "192.0.2.1"}],
+                 "action": "ACCEPT"},
+                {"action": "REJECT"}
+        ]))
+        # both matches
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.1', 12345))
+        [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+        self.assertEqual(rcode.to_text(), "NOERROR")
+        # TSIG matches, but address doesn't
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.2', 12345))
+        [rcode, msg] = self.xfrsess._parse_query_message(request_data)
+        self.assertEqual(rcode.to_text(), "REFUSED")
+        # Address matches, but TSIG doesn't (not included)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.1', 12345))
+        [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+        self.assertEqual(rcode.to_text(), "REFUSED")
+        # Neither address nor TSIG matches
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.2', 12345))
+        [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
+        self.assertEqual(rcode.to_text(), "REFUSED")
+
+    def test_transfer_acl(self):
+        # ACL checks only with the default ACL
+        def acl_setter(acl):
+            self.xfrsess._acl = acl
+        self.check_transfer_acl(acl_setter)
+
+    def test_transfer_zoneacl(self):
+        # ACL check with a per zone ACL + default ACL.  The per zone ACL
+        # should match the queryied zone, so it should be used.
+        def acl_setter(acl):
+            zone_key = ('IN', 'example.com.')
+            self.xfrsess._zone_config[zone_key] = {}
+            self.xfrsess._zone_config[zone_key]['transfer_acl'] = acl
+            self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+                    {"from": "127.0.0.1", "action": "DROP"}])
+        self.check_transfer_acl(acl_setter)
+
+    def test_transfer_zoneacl_nomatch(self):
+        # similar to the previous one, but the per zone doesn't match the
+        # query.  The default should be used.
+        def acl_setter(acl):
+            zone_key = ('IN', 'example.org.')
+            self.xfrsess._zone_config[zone_key] = {}
+            self.xfrsess._zone_config[zone_key]['transfer_acl'] = \
+                isc.acl.dns.REQUEST_LOADER.load([
+                    {"from": "127.0.0.1", "action": "DROP"}])
+            self.xfrsess._acl = acl
+        self.check_transfer_acl(acl_setter)
+
+    def test_get_transfer_acl(self):
+        # set the default ACL.  If there's no specific zone ACL, this one
+        # should be used.
+        self.xfrsess._acl = isc.acl.dns.REQUEST_LOADER.load([
+                {"from": "127.0.0.1", "action": "ACCEPT"}])
+        acl = self.xfrsess._get_transfer_acl(Name('example.com'), RRClass.IN())
+        self.assertEqual(acl, self.xfrsess._acl)
+
+        # install a per zone config with transfer ACL for example.com.  Then
+        # that ACL will be used for example.com; for others the default ACL
+        # will still be used.
+        com_acl = isc.acl.dns.REQUEST_LOADER.load([
+                {"from": "127.0.0.1", "action": "REJECT"}])
+        self.xfrsess._zone_config[('IN', 'example.com.')] = {}
+        self.xfrsess._zone_config[('IN', 'example.com.')]['transfer_acl'] = \
+            com_acl
+        self.assertEqual(com_acl,
+                         self.xfrsess._get_transfer_acl(Name('example.com'),
+                                                        RRClass.IN()))
+        self.assertEqual(self.xfrsess._acl,
+                         self.xfrsess._get_transfer_acl(Name('example.org'),
+                                                        RRClass.IN()))
+
+        # Name matching should be case insensitive.
+        self.assertEqual(com_acl,
+                         self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
+                                                        RRClass.IN()))
 
     def test_send_data(self):
         self.xfrsess._send_data(self.sock, self.mdata)
@@ -198,10 +525,13 @@ class TestXfroutSession(unittest.TestCase):
     def test_send_message(self):
         msg = self.getmsg()
         msg.make_response()
-        # soa record data with different cases
-        soa_record = (4, 3, 'Example.com.', 'com.Example.', 3600, 'SOA', None, 'master.Example.com. admin.exAmple.com. 1234 3600 1800 2419200 7200')
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(soa_record)
-        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        # SOA record data with different cases
+        soa_rrset = RRset(Name('Example.com.'), RRClass.IN(), RRType.SOA(),
+                               RRTTL(3600))
+        soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                                  'master.Example.com. admin.exAmple.com. ' +
+                                  '2011112001 3600 1800 2419200 7200'))
+        msg.add_rrset(Message.SECTION_ANSWER, soa_rrset)
         self.xfrsess._send_message(self.sock, msg)
         send_out_data = self.sock.readsent()[2:]
 
@@ -230,61 +560,44 @@ class TestXfroutSession(unittest.TestCase):
         self.assertEqual(msg.get_rcode(), rcode)
         self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
 
-    def test_create_rrset_from_db_record(self):
-        rrset = self.xfrsess._create_rrset_from_db_record(self.soa_record)
-        self.assertEqual(rrset.get_name().to_text(), "example.com.")
-        self.assertEqual(rrset.get_class(), RRClass("IN"))
-        self.assertEqual(rrset.get_type().to_text(), "SOA")
-        rdata = rrset.get_rdata()
-        self.assertEqual(rdata[0].to_text(), self.soa_record[7])
-
     def test_send_message_with_last_soa(self):
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
         msg = self.getmsg()
         msg.make_response()
 
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
-                                                 0, packet_neet_not_sign)
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
-        # tsig context is not exist
+        # tsig context does not exist
         self.assertFalse(self.message_has_tsig(get_msg))
 
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        #answer_rrset_iter = section_iter(get_msg, section.ANSWER())
-        answer = get_msg.get_section(Message.SECTION_ANSWER)[0]#answer_rrset_iter.get_rrset()
+        answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
         self.assertEqual(answer.get_name().to_text(), "example.com.")
         self.assertEqual(answer.get_class(), RRClass("IN"))
         self.assertEqual(answer.get_type().to_text(), "SOA")
         rdata = answer.get_rdata()
-        self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+        self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
 
-        # msg is the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
-                                                 0, TSIG_SIGN_EVERY_NTH)
+        # Sending the message with last soa together
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
-        # tsig context is not exist
+        # tsig context does not exist
         self.assertFalse(self.message_has_tsig(get_msg))
 
     def test_send_message_with_last_soa_with_tsig(self):
         # create tsig context
         self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
 
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
         msg = self.getmsg()
         msg.make_response()
 
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
-        # msg is not the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
-                                                 0, packet_neet_not_sign)
+        # Sending the message with last soa together
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
 
@@ -292,33 +605,25 @@ class TestXfroutSession(unittest.TestCase):
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        # msg is the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
-                                                 0, TSIG_SIGN_EVERY_NTH)
-        get_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(get_msg))
-
     def test_trigger_send_message_with_last_soa(self):
         rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
         rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
 
         msg = self.getmsg()
         msg.make_response()
         msg.add_rrset(Message.SECTION_ANSWER, rrset_a)
 
         # length larger than MAX-len(rrset)
-        length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+        length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
+            get_rrset_len(self.soa_rrset) + 1
 
         # give the function a value that is larger than MAX-len(rrset)
         # this should have triggered the sending of two messages
         # (1 with the rrset we added manually, and 1 that triggered
         # the sending in _with_last_soa)
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
-                                                 packet_neet_not_sign)
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset,
+                                                 length_need_split)
         get_msg = self.sock.read_msg()
         self.assertFalse(self.message_has_tsig(get_msg))
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
@@ -338,100 +643,139 @@ class TestXfroutSession(unittest.TestCase):
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        #answer_rrset_iter = section_iter(get_msg, Message.SECTION_ANSWER)
         answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
         self.assertEqual(answer.get_name().to_text(), "example.com.")
         self.assertEqual(answer.get_class(), RRClass("IN"))
         self.assertEqual(answer.get_type().to_text(), "SOA")
         rdata = answer.get_rdata()
-        self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+        self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
 
         # and it should not have sent anything else
         self.assertEqual(0, len(self.sock.sendqueue))
 
     def test_trigger_send_message_with_last_soa_with_tsig(self):
         self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
         msg = self.getmsg()
         msg.make_response()
-        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        msg.add_rrset(Message.SECTION_ANSWER, self.soa_rrset)
 
         # length larger than MAX-len(rrset)
-        length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
+        length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
+            get_rrset_len(self.soa_rrset) + 1
 
         # give the function a value that is larger than MAX-len(rrset)
         # this should have triggered the sending of two messages
         # (1 with the rrset we added manually, and 1 that triggered
         # the sending in _with_last_soa)
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
-                                                 packet_neet_not_sign)
-        get_msg = self.sock.read_msg()
-        # msg is not the TSIG_SIGN_EVERY_NTH one, it shouldn't be tsig signed
-        self.assertFalse(self.message_has_tsig(get_msg))
-        # the last packet should be tsig signed
-        get_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(get_msg))
-        # and it should not have sent anything else
-        self.assertEqual(0, len(self.sock.sendqueue))
-
-
-        # msg is the TSIG_SIGN_EVERY_NTH one, it should be tsig signed
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
-                                                 xfrout.TSIG_SIGN_EVERY_NTH)
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset,
+                                                 length_need_split)
+        # Both messages should have TSIG RRs
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
-        # the last packet should be tsig signed
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
         # and it should not have sent anything else
         self.assertEqual(0, len(self.sock.sendqueue))
 
     def test_get_rrset_len(self):
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
-        self.assertEqual(82, get_rrset_len(rrset_soa))
-
-    def test_zone_has_soa(self):
-        global sqlite3_ds
-        def mydb1(zone, file):
-            return True
-        sqlite3_ds.get_zone_soa = mydb1
-        self.assertTrue(self.xfrsess._zone_has_soa(""))
-        def mydb2(zone, file):
-            return False
-        sqlite3_ds.get_zone_soa = mydb2
-        self.assertFalse(self.xfrsess._zone_has_soa(""))
-
-    def test_zone_exist(self):
-        global sqlite3_ds
-        def zone_exist(zone, file):
-            return zone
-        sqlite3_ds.zone_exist = zone_exist
-        self.assertTrue(self.xfrsess._zone_exist(True))
-        self.assertFalse(self.xfrsess._zone_exist(False))
-
-    def test_check_xfrout_available(self):
-        def zone_exist(zone):
-            return zone
-        def zone_has_soa(zone):
-            return (not zone)
-        self.xfrsess._zone_exist = zone_exist
-        self.xfrsess._zone_has_soa = zone_has_soa
-        self.assertEqual(self.xfrsess._check_xfrout_available(False).to_text(), "NOTAUTH")
-        self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "SERVFAIL")
-
-        def zone_empty(zone):
-            return zone
-        self.xfrsess._zone_has_soa = zone_empty
-        def false_func():
-            return False
-        self.xfrsess._server.increase_transfers_counter = false_func
-        self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "REFUSED")
-        def true_func():
-            return True
-        self.xfrsess._server.increase_transfers_counter = true_func
-        self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "NOERROR")
+        self.assertEqual(82, get_rrset_len(self.soa_rrset))
+
+    def test_xfrout_axfr_setup(self):
+        self.xfrsess.ClientClass = MockDataSrcClient
+        # Successful case.  A zone iterator should be set up.
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+
+        # Failure cases
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('notauth.example.com'), TEST_RRCLASS),
+                         Rcode.NOTAUTH())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('nosoa.example.com'), TEST_RRCLASS),
+                         Rcode.SERVFAIL())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('multisoa.example.com'), TEST_RRCLASS),
+                         Rcode.SERVFAIL())
+
+    def test_xfrout_ixfr_setup(self):
+        self.xfrsess.ClientClass = MockDataSrcClient
+        self.set_request_type(RRType.IXFR())
+
+        # Successful case of pure IXFR.  A zone journal reader should be set
+        # up.
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._jnl_reader)
+
+        # Successful case, but as a result of falling back to AXFR-style
+        # IXFR.  A zone iterator should be set up instead of a journal reader.
+        self.mdata = self.create_request_data(ixfr=IXFR_NG_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+        self.assertEqual(None, self.xfrsess._jnl_reader)
+
+        # Successful case, but the requested SOA serial is equal to that of
+        # the local SOA.  Both iterator and jnl_reader should be None,
+        # indicating that the response will contain just one SOA.
+        self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertEqual(None, self.xfrsess._iterator)
+        self.assertEqual(None, self.xfrsess._jnl_reader)
+
+        # The data source doesn't support journaling.  Should fallback to AXFR.
+        zone_name = Name('nojournal.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+
+        # Failure cases
+        zone_name = Name('notauth.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+        # this is a strange case: zone's SOA will be found but the journal
+        # reader won't be created due to 'no such zone'.
+        zone_name = Name('notauth2.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+        zone_name = Name('nosoa.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+        zone_name = Name('multisoa.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+
+        # query name doesn't match the SOA's owner
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+        # query's RR class doesn't match the SOA's class
+        zone_name = TEST_ZONE_NAME # make sure the name matches this time
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              soa_class=RRClass.CH())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+        # multiple SOA RRs
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              num_soa=2)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
 
     def test_dns_xfrout_start_formerror(self):
         # formerror
@@ -439,107 +783,343 @@ class TestXfroutSession(unittest.TestCase):
         sent_data = self.sock.readsent()
         self.assertEqual(len(sent_data), 0)
 
-    def default(self, param):
-        return "example.com"
-
     def test_dns_xfrout_start_notauth(self):
-        self.xfrsess._get_query_zone_name = self.default
-        def notauth(formpara):
+        def notauth(msg, name, rrclass):
             return Rcode.NOTAUTH()
-        self.xfrsess._check_xfrout_available = notauth
+        self.xfrsess._xfrout_setup = notauth
         self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
         get_msg = self.sock.read_msg()
         self.assertEqual(get_msg.get_rcode().to_text(), "NOTAUTH")
 
+    def test_dns_xfrout_start_datasrc_servfail(self):
+        def internal_raise(x, y):
+            raise isc.datasrc.Error('exception for the sake of test')
+        self.xfrsess.ClientClass = internal_raise
+        self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
+        self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL())
+
     def test_dns_xfrout_start_noerror(self):
-        self.xfrsess._get_query_zone_name = self.default
-        def noerror(form):
+        def noerror(msg, name, rrclass):
             return Rcode.NOERROR()
-        self.xfrsess._check_xfrout_available = noerror
+        self.xfrsess._xfrout_setup = noerror
 
-        def myreply(msg, sock, zonename):
+        def myreply(msg, sock):
             self.sock.send(b"success")
 
         self.xfrsess._reply_xfrout_query = myreply
         self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
         self.assertEqual(self.sock.readsent(), b"success")
 
-    def test_reply_xfrout_query_noerror(self):
-        global sqlite3_ds
-        def get_zone_soa(zonename, file):
-            return self.soa_record
-
-        def get_zone_datas(zone, file):
-            return [self.soa_record]
-
-        sqlite3_ds.get_zone_soa = get_zone_soa
-        sqlite3_ds.get_zone_datas = get_zone_datas
-        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+    def test_reply_xfrout_query_axfr(self):
+        self.xfrsess._soa = self.soa_rrset
+        self.xfrsess._iterator = [self.soa_rrset]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
         reply_msg = self.sock.read_msg()
         self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
 
-    def test_reply_xfrout_query_noerror_with_tsig(self):
-        rrset_data = (4, 3, 'a.example.com.', 'com.example.', 3600, 'A', None, '192.168.1.1')
-        global sqlite3_ds
+    def test_reply_xfrout_query_axfr_with_tsig(self):
+        rrset = RRset(Name('a.example.com'), RRClass.IN(), RRType.A(),
+                      RRTTL(3600))
+        rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), '192.0.2.1'))
         global xfrout
-        def get_zone_soa(zonename, file):
-            return self.soa_record
-
-        def get_zone_datas(zone, file):
-            zone_rrsets = []
-            for i in range(0, 100):
-                zone_rrsets.insert(i, rrset_data)
-            return zone_rrsets
 
         def get_rrset_len(rrset):
             return 65520
 
-        sqlite3_ds.get_zone_soa = get_zone_soa
-        sqlite3_ds.get_zone_datas = get_zone_datas
+        self.xfrsess._soa = self.soa_rrset
+        self.xfrsess._iterator = [rrset for i in range(0, 100)]
         xfrout.get_rrset_len = get_rrset_len
 
         self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
-        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
 
-        # tsig signed first package
-        reply_msg = self.sock.read_msg()
-        self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
-        self.assertTrue(self.message_has_tsig(reply_msg))
-        # (TSIG_SIGN_EVERY_NTH - 1) packets have no tsig
-        for i in range(0, xfrout.TSIG_SIGN_EVERY_NTH - 1):
+        # All messages must have TSIG as we don't support the feature of
+        # skipping intermediate TSIG records (with bulk signing).
+        for i in range(0, 102): # 102 = all 100 RRs from iterator and 2 SOAs
             reply_msg = self.sock.read_msg()
-            self.assertFalse(self.message_has_tsig(reply_msg))
-        # TSIG_SIGN_EVERY_NTH packet has tsig
-        reply_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(reply_msg))
-
-        for i in range(0, 100 - TSIG_SIGN_EVERY_NTH):
-            reply_msg = self.sock.read_msg()
-            self.assertFalse(self.message_has_tsig(reply_msg))
-        # tsig signed last package
-        reply_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(reply_msg))
+            # With the hack of get_rrset_len() above, every message must have
+            # exactly one RR in the answer section.
+            self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+            self.assertTrue(self.message_has_tsig(reply_msg))
 
         # and it should not have sent anything else
         self.assertEqual(0, len(self.sock.sendqueue))
 
-class MyCCSession():
-    def __init__(self):
-        pass
+    def test_reply_xfrout_query_ixfr(self):
+        # Creating a pure (incremental) IXFR response.  Intermediate SOA
+        # RRs won't be skipped.
+        self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+        self.xfrsess._iterator = [create_soa(IXFR_OK_VERSION),
+                                  create_a(Name('a.example.com'), '192.0.2.2'),
+                                  create_soa(SOA_CURRENT_VERSION),
+                                  create_aaaa(Name('a.example.com'),
+                                              '2001:db8::1')]
+        self.xfrsess._jnl_reader = self.xfrsess._iterator
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+        actual_records = reply_msg.get_section(Message.SECTION_ANSWER)
+
+        expected_records = self.xfrsess._iterator[:]
+        expected_records.insert(0, create_soa(SOA_CURRENT_VERSION))
+        expected_records.append(create_soa(SOA_CURRENT_VERSION))
+
+        self.assertEqual(len(expected_records), len(actual_records))
+        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+    def test_reply_xfrout_query_axfr_maxlen(self):
+        # The test RR(set) has the length of 65535 - 12 (size of hdr) bytes:
+        # owner name = 1 (root), fixed fields (type,class,TTL,RDLEN) = 10
+        # RDATA = 65512 (= 65535 - 12 - 1 - 10)
+        self.xfrsess._soa = self.soa_rrset
+        test_rr = create_generic(Name('.'), 65512)
+        self.xfrsess._iterator = [self.soa_rrset, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        # The first message should contain the beginning SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # The second message should contain the beginning SOA, and only that RR
+        # The wire format data should have the possible maximum size.
+        r, rlen = self.sock.read_msg(need_len=True)
+        self.assertEqual(65535, rlen)
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(test_rr,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # The third message should contain the ending SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+
+        # there should be no more message
+        self.assertEqual(0, len(self.sock.sendqueue))
 
-    def get_remote_config_value(self, module_name, identifier):
-        if module_name == "Auth" and identifier == "database_file":
-            return "initdb.file", False
-        else:
-            return "unknown", False
+    def maxlen_test_common_setup(self, tsig=False):
+        '''Common initialization for some of the tests below
+
+        For those tests we use '.' for all owner names and names in RDATA
+        to avoid having unexpected results due to compression.  It returns
+        the created SOA for convenience.
+
+        If tsig is True, also setup TSIG (mock) context.  In our test cases
+        the size of the TSIG RR is 81 bytes (key name = example.com,
+        algorithm = hmac-md5)
+
+        '''
+        soa = RRset(Name('.'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), '. . 0 0 0 0 0'))
+        self.mdata = self.create_request_data(zone_name=Name('.'))
+        self.xfrsess._soa = soa
+        if tsig:
+            self.xfrsess._tsig_ctx = \
+                self.create_mock_tsig_ctx(TSIGError.NOERROR)
+            self.xfrsess._tsig_len = 81
+        return soa
+
+    def maxlen_test_common_checks(self, soa_rr, test_rr, expected_n_rr):
+        '''A set of common assertion checks for some tests below.
+
+        In all cases two AXFR response messages should have been created.
+        expected_n_rr is a list of two elements, each specifies the expected
+        number of answer RRs for each message: expected_n_rr[0] is the expected
+        number of the first answer RRs; expected_n_rr[1] is the expected number
+        of the second answer RRs.  The message that contains two RRs should
+        have the maximum possible wire length (65535 bytes).  And, in all
+        cases, the resulting RRs should be in the order of SOA, another RR,
+        SOA.
+
+        '''
+        # Check the first message
+        r, rlen = self.sock.read_msg(need_len=True)
+        if expected_n_rr[0] == 2:
+            self.assertEqual(65535, rlen)
+        self.assertEqual(expected_n_rr[0],
+                         r.get_rr_count(Message.SECTION_ANSWER))
+        actual_rrs = r.get_section(Message.SECTION_ANSWER)[:]
+
+        # Check the second message
+        r, rlen = self.sock.read_msg(need_len=True)
+        if expected_n_rr[1] == 2:
+            self.assertEqual(65535, rlen)
+        self.assertEqual(expected_n_rr[1],
+                         r.get_rr_count(Message.SECTION_ANSWER))
+        actual_rrs.extend(r.get_section(Message.SECTION_ANSWER))
+        for (expected_rr, actual_rr) in zip([soa_rr, test_rr, soa_rr],
+                                            actual_rrs):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+        # there should be no more message
+        self.assertEqual(0, len(self.sock.sendqueue))
+
+    def test_reply_xfrout_query_axfr_maxlen_with_soa(self):
+        # Similar to the 'maxlen' test, but the first message should be
+        # able to contain both SOA and the large RR.
+        soa = self.maxlen_test_common_setup()
+
+        # The first message will contain the question (5 bytes), so the
+        # test RDATA should allow a room for that.
+        test_rr = create_generic(Name('.'), 65512 - 5 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_soa_with_tsig(self):
+        # Similar to the previous case, but with TSIG (whose size is 81 bytes).
+        soa = self.maxlen_test_common_setup(True)
+        test_rr = create_generic(Name('.'), 65512 - 5 - 81 -
+                                 get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_endsoa(self):
+        # Similar to the max w/ soa test, but the first message cannot contain
+        # both SOA and the long RR due to the question section.  The second
+        # message should be able to contain both.
+        soa = self.maxlen_test_common_setup()
+        test_rr = create_generic(Name('.'), 65512 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_endsoa_with_tsig(self):
+        # Similar to the previous case, but with TSIG.
+        soa = self.maxlen_test_common_setup(True)
+        test_rr = create_generic(Name('.'), 65512 - 81 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+    def test_reply_xfrout_query_axfr_toobigdata(self):
+        # Similar to the 'maxlen' test, but the RR doesn't even fit in a
+        # single message.
+        self.xfrsess._soa = self.soa_rrset
+        test_rr = create_generic(Name('.'), 65513) # 1 byte larger than 'max'
+        self.xfrsess._iterator = [self.soa_rrset, test_rr]
+        # the reply method should fail with exception
+        self.assertRaises(XfroutSessionError, self.xfrsess._reply_xfrout_query,
+                          self.getmsg(), self.sock)
+        # The first message should still have been sent and contain the
+        # beginning SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # And there should have been no other messages sent
+        self.assertEqual(0, len(self.sock.sendqueue))
 
+    def test_reply_xfrout_query_ixfr_soa_only(self):
+        # Creating an IXFR response that contains only one RR, which is the
+        # SOA of the current version.
+        self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+        self.xfrsess._iterator = None
+        self.xfrsess._jnl_reader = None
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+        answer = reply_msg.get_section(Message.SECTION_ANSWER)
+        self.assertEqual(1, len(answer))
+        self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+                                     answer[0]))
+
+class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
+    '''Tests for XFR-out sessions using an SQLite3 DB.
+
+    These are provided mainly to confirm the implementation actually works
+    in an environment closer to actual operational environments.  So we
+    only check a few common cases; other details are tested using mock
+    data sources.
+
+    '''
+    def setUp(self):
+        super().setUp()
+        self.xfrsess._request_data = self.mdata
+        self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
+            'test.sqlite3'
+        self.ns_name = 'a.dns.example.com'
+
+    def check_axfr_stream(self, response):
+        '''Common checks for AXFR(-style) response for the test zone.
+        '''
+        # This zone contains two A RRs for the same name with different TTLs.
+        # These TTLs should be preseved in the AXFR stream.
+        actual_records = response.get_section(Message.SECTION_ANSWER)
+        self.assertEqual(5, len(actual_records))
+        # The first and last RR should be the expected SOA
+        expected_soa = create_soa(2011112001)
+        self.assertTrue(rrsets_equal(expected_soa, actual_records[0]))
+        self.assertTrue(rrsets_equal(expected_soa, actual_records[-1]))
+
+        # The ordering of the intermediate RRs can differ depending on the
+        # internal details of the SQLite3 library, so we sort them by a simple
+        # rule sufficient for the purpose here, and then compare them.
+        expected_others = [create_ns(self.ns_name),
+                           create_a(Name(self.ns_name), '192.0.2.1', 3600),
+                           create_a(Name(self.ns_name), '192.0.2.2', 7200)]
+        keyfn = lambda x: (x.get_type(), x.get_ttl())
+        for (expected_rr, actual_rr) in zip(sorted(expected_others, key=keyfn),
+                                            sorted(actual_records[1:4],
+                                                   key=keyfn)):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+    def test_axfr_normal_session(self):
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+        self.check_axfr_stream(response)
+
+    def test_ixfr_to_axfr(self):
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=IXFR_NG_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+        # This is an AXFR-style IXFR.  So the question section should indicate
+        # that it's an IXFR resposne.
+        self.assertEqual(RRType.IXFR(), response.get_question()[0].get_type())
+        self.check_axfr_stream(response)
+
+    def test_ixfr_normal_session(self):
+        # See testdata/creatediff.py.  There are 8 changes between two
+        # versions.  So the answer section should contain all of these and
+        # two beginning and trailing SOAs.
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=IXFR_OK_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        actual_records = response.get_section(Message.SECTION_ANSWER)
+        expected_records = [create_soa(2011112001), create_soa(2011111802),
+                            create_soa(2011111900),
+                            create_a(Name(self.ns_name), '192.0.2.2', 7200),
+                            create_soa(2011111900),
+                            create_a(Name(self.ns_name), '192.0.2.53'),
+                            create_aaaa(Name(self.ns_name), '2001:db8::1'),
+                            create_soa(2011112001),
+                            create_a(Name(self.ns_name), '192.0.2.1'),
+                            create_soa(2011112001)]
+        self.assertEqual(len(expected_records), len(actual_records))
+        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+    def test_ixfr_soa_only(self):
+        # The requested SOA serial is the latest one.  The response should
+        # contain exactly one SOA of that serial.
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        answers = response.get_section(Message.SECTION_ANSWER)
+        self.assertEqual(1, len(answers))
+        self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+                                     answers[0]))
 
 class MyUnixSockServer(UnixSockServer):
     def __init__(self):
         self._shutdown_event = threading.Event()
-        self._max_transfers_out = 10
-        self._cc = MyCCSession()
         self._common_init()
+        self._cc = MyCCSession()
+        self.update_config_data(self._cc.get_full_config())
 
 class TestUnixSockServer(unittest.TestCase):
     def setUp(self):
@@ -551,23 +1131,27 @@ class TestUnixSockServer(unittest.TestCase):
            file descriptor. This is needed, because we get only that one
            from auth."""
         # We test with UDP, as it can be "connected" without other
-        # endpoint
+        # endpoint.  Note that in the current implementation _guess_remote()
+        # unconditionally returns SOCK_STREAM.
         sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
         sock.connect(('127.0.0.1', 12345))
-        self.assertEqual(('127.0.0.1', 12345),
+        self.assertEqual((socket.AF_INET, socket.SOCK_STREAM,
+                          ('127.0.0.1', 12345)),
                          self.unix._guess_remote(sock.fileno()))
         if socket.has_ipv6:
             # Don't check IPv6 address on hosts not supporting them
             sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
             sock.connect(('::1', 12345))
-            self.assertEqual(('::1', 12345, 0, 0),
+            self.assertEqual((socket.AF_INET6, socket.SOCK_STREAM,
+                              ('::1', 12345, 0, 0)),
                              self.unix._guess_remote(sock.fileno()))
             # Try when pretending there's no IPv6 support
             # (No need to pretend when there's really no IPv6)
             xfrout.socket.has_ipv6 = False
             sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
             sock.connect(('127.0.0.1', 12345))
-            self.assertEqual(('127.0.0.1', 12345),
+            self.assertEqual((socket.AF_INET, socket.SOCK_STREAM,
+                              ('127.0.0.1', 12345)),
                              self.unix._guess_remote(sock.fileno()))
             # Return it back
             xfrout.socket.has_ipv6 = True
@@ -587,17 +1171,17 @@ class TestUnixSockServer(unittest.TestCase):
                                              socket.AI_NUMERICHOST)[0][4])
         self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
 
-    def check_loaded_ACL(self):
+    def check_loaded_ACL(self, acl):
         context = isc.acl.dns.RequestContext(socket.getaddrinfo("127.0.0.1",
                                              1234, 0, socket.SOCK_DGRAM,
                                              socket.IPPROTO_UDP,
                                              socket.AI_NUMERICHOST)[0][4])
-        self.assertEqual(isc.acl.acl.ACCEPT, self.unix._acl.execute(context))
+        self.assertEqual(isc.acl.acl.ACCEPT, acl.execute(context))
         context = isc.acl.dns.RequestContext(socket.getaddrinfo("192.0.2.1",
                                              1234, 0, socket.SOCK_DGRAM,
                                              socket.IPPROTO_UDP,
                                              socket.AI_NUMERICHOST)[0][4])
-        self.assertEqual(isc.acl.acl.REJECT, self.unix._acl.execute(context))
+        self.assertEqual(isc.acl.acl.REJECT, acl.execute(context))
 
     def test_update_config_data(self):
         self.check_default_ACL()
@@ -622,14 +1206,79 @@ class TestUnixSockServer(unittest.TestCase):
         self.assertEqual(self.unix.tsig_key_ring.size(), 0)
 
         # Load the ACL
-        self.unix.update_config_data({'query_acl': [{'from': '127.0.0.1',
+        self.unix.update_config_data({'transfer_acl': [{'from': '127.0.0.1',
                                                'action': 'ACCEPT'}]})
-        self.check_loaded_ACL()
+        self.check_loaded_ACL(self.unix._acl)
         # Pass a wrong data there and check it does not replace the old one
-        self.assertRaises(isc.acl.acl.LoaderError,
+        self.assertRaises(XfroutConfigError,
+                          self.unix.update_config_data,
+                          {'transfer_acl': ['Something bad']})
+        self.check_loaded_ACL(self.unix._acl)
+
+    def test_zone_config_data(self):
+        # By default, there's no specific zone config
+        self.assertEqual({}, self.unix._zone_config)
+
+        # Adding config for a specific zone.  The config is empty unless
+        # explicitly specified.
+        self.unix.update_config_data({'zone_config':
+                                          [{'origin': 'example.com',
+                                            'class': 'IN'}]})
+        self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+        # zone class can be omitted
+        self.unix.update_config_data({'zone_config':
+                                          [{'origin': 'example.com'}]})
+        self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+        # zone class, name are stored in the "normalized" form.  class
+        # strings are upper cased, names are down cased.
+        self.unix.update_config_data({'zone_config':
+                                          [{'origin': 'EXAMPLE.com'}]})
+        self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+
+        # invalid zone class, name will result in exceptions
+        self.assertRaises(EmptyLabel,
+                          self.unix.update_config_data,
+                          {'zone_config': [{'origin': 'bad..example'}]})
+        self.assertRaises(InvalidRRClass,
+                          self.unix.update_config_data,
+                          {'zone_config': [{'origin': 'example.com',
+                                            'class': 'badclass'}]})
+
+        # Configuring a couple of more zones
+        self.unix.update_config_data({'zone_config':
+                                          [{'origin': 'example.com'},
+                                           {'origin': 'example.com',
+                                            'class': 'CH'},
+                                           {'origin': 'example.org'}]})
+        self.assertEqual({}, self.unix._zone_config[('IN', 'example.com.')])
+        self.assertEqual({}, self.unix._zone_config[('CH', 'example.com.')])
+        self.assertEqual({}, self.unix._zone_config[('IN', 'example.org.')])
+
+        # Duplicate data: should be rejected with an exception
+        self.assertRaises(XfroutConfigError,
+                          self.unix.update_config_data,
+                          {'zone_config': [{'origin': 'example.com'},
+                                           {'origin': 'example.org'},
+                                           {'origin': 'example.com'}]})
+
+    def test_zone_config_data_with_acl(self):
+        # Similar to the previous test, but with transfer_acl config
+        self.unix.update_config_data({'zone_config':
+                                          [{'origin': 'example.com',
+                                            'transfer_acl':
+                                                [{'from': '127.0.0.1',
+                                                  'action': 'ACCEPT'}]}]})
+        acl = self.unix._zone_config[('IN', 'example.com.')]['transfer_acl']
+        self.check_loaded_ACL(acl)
+
+        # invalid ACL syntax will be rejected with exception
+        self.assertRaises(XfroutConfigError,
                           self.unix.update_config_data,
-                          {'query_acl': ['Something bad']})
-        self.check_loaded_ACL()
+                          {'zone_config': [{'origin': 'example.com',
+                                            'transfer_acl':
+                                                [{'action': 'BADACTION'}]}]})
 
     def test_get_db_file(self):
         self.assertEqual(self.unix.get_db_file(), "initdb.file")
@@ -738,7 +1387,7 @@ class TestInitialization(unittest.TestCase):
         self.setEnv("BIND10_XFROUT_SOCKET_FILE", None)
         xfrout.init_paths()
         self.assertEqual(xfrout.UNIX_SOCKET_FILE,
-                         "@@LOCALSTATEDIR@@/auth_xfrout_conn")
+                         "@@LOCALSTATEDIR@@/@PACKAGE_NAME@/auth_xfrout_conn")
 
     def testProvidedSocket(self):
         self.setEnv("B10_FROM_BUILD", None)
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 2e94369..310a0aa 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -22,7 +22,7 @@ import isc.cc
 import threading
 import struct
 import signal
-from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient, ZoneFinder, ZoneJournalReader
 from socketserver import *
 import os
 from isc.config.ccsession import *
@@ -35,10 +35,11 @@ import errno
 from optparse import OptionParser, OptionValueError
 from isc.util import socketserver_mixin
 
-from xfrout_messages import *
+from isc.log_messages.xfrout_messages import *
 
 isc.log.init("b10-xfrout")
 logger = isc.log.Logger("xfrout")
+DBG_XFROUT_TRACE = logger.DBGLVL_TRACE_BASIC
 
 try:
     from libutil_io_python import *
@@ -46,13 +47,30 @@ try:
 except ImportError as e:
     # C++ loadable module may not be installed; even so the xfrout process
     # must keep running, so we warn about it and move forward.
-    log.error(XFROUT_IMPORT, str(e))
+    logger.error(XFROUT_IMPORT, str(e))
 
-from isc.acl.acl import ACCEPT, REJECT, DROP
+from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
 from isc.acl.dns import REQUEST_LOADER
 
 isc.util.process.rename()
 
+class XfroutConfigError(Exception):
+    """An exception indicating an error in updating xfrout configuration.
+
+    This exception is raised when the xfrout process encouters an error in
+    handling configuration updates.  Not all syntax error can be caught
+    at the module-CC layer, so xfrout needs to (explicitly or implicitly)
+    validate the given configuration data itself.  When it finds an error
+    it raises this exception (either directly or by converting an exception
+    from other modules) as a unified error in configuration.
+    """
+    pass
+
+class XfroutSessionError(Exception):
+    '''An exception raised for some unexpected events during an xfrout session.
+    '''
+    pass
+
 def init_paths():
     global SPECFILE_PATH
     global AUTH_SPECFILE_PATH
@@ -73,19 +91,47 @@ def init_paths():
         if "BIND10_XFROUT_SOCKET_FILE" in os.environ:
             UNIX_SOCKET_FILE = os.environ["BIND10_XFROUT_SOCKET_FILE"]
         else:
-            UNIX_SOCKET_FILE = "@@LOCALSTATEDIR@@/auth_xfrout_conn"
+            UNIX_SOCKET_FILE = "@@LOCALSTATEDIR@@/@PACKAGE_NAME@/auth_xfrout_conn"
 
 init_paths()
 
 SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
 AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
-MAX_TRANSFERS_OUT = 10
 VERBOSE_MODE = False
-# tsig sign every N axfr packets.
-TSIG_SIGN_EVERY_NTH = 96
-
-XFROUT_MAX_MESSAGE_SIZE = 65535
-
+XFROUT_DNS_HEADER_SIZE = 12     # protocol constant
+XFROUT_MAX_MESSAGE_SIZE = 65535 # ditto
+
+# borrowed from xfrin.py @ #1298.  We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+    """Helper function to format a zone name and class as a string of
+       the form '<name>/<class>'.
+       Parameters:
+       zone_name (isc.dns.Name) name to format
+       zone_class (isc.dns.RRClass) class to format
+    """
+    return zone_name.to_text(True) + '/' + str(zone_class)
+
+# borrowed from xfrin.py @ #1298.
+def format_addrinfo(addrinfo):
+    """Helper function to format the addrinfo as a string of the form
+       <addr>:<port> (for IPv4) or [<addr>]:port (for IPv6). For unix domain
+       sockets, and unknown address families, it returns a basic string
+       conversion of the third element of the passed tuple.
+       Parameters:
+       addrinfo: a 3-tuple consisting of address family, socket type, and,
+                 depending on the family, either a 2-tuple with the address
+                 and port, or a filename
+    """
+    try:
+        if addrinfo[0] == socket.AF_INET:
+            return str(addrinfo[2][0]) + ":" + str(addrinfo[2][1])
+        elif addrinfo[0] == socket.AF_INET6:
+            return "[" + str(addrinfo[2][0]) + "]:" + str(addrinfo[2][1])
+        else:
+            return str(addrinfo[2])
+    except IndexError:
+        raise TypeError("addrinfo argument to format_addrinfo() does not "
+                        "appear to be consisting of (family, socktype, (addr, port))")
 
 def get_rrset_len(rrset):
     """Returns the wire length of the given RRset"""
@@ -93,10 +139,15 @@ def get_rrset_len(rrset):
     rrset.to_wire(bytes)
     return len(bytes)
 
+def get_soa_serial(soa_rdata):
+    '''Extract the serial field of an SOA RDATA and returns it as an intger.
+    (borrowed from xfrin)
+    '''
+    return int(soa_rdata.to_text().split()[2])
 
 class XfroutSession():
     def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
-                 acl):
+                 default_acl, zone_config, client_class=DataSourceClient):
         self._sock_fd = sock_fd
         self._request_data = request_data
         self._server = server
@@ -104,22 +155,53 @@ class XfroutSession():
         self._tsig_ctx = None
         self._tsig_len = 0
         self._remote = remote
-        self._acl = acl
-        self.handle()
+        self._request_type = None
+        self._request_typestr = None
+        self._acl = default_acl
+        self._zone_config = zone_config
+        self.ClientClass = client_class # parameterize this for testing
+        self._soa = None # will be set in _xfrout_setup or in tests
+        self._jnl_reader = None # will be set to a reader for IXFR
+        self._handle()
 
     def create_tsig_ctx(self, tsig_record, tsig_key_ring):
         return TSIGContext(tsig_record.get_name(), tsig_record.get_rdata().get_algorithm(),
                            tsig_key_ring)
 
-    def handle(self):
-        ''' Handle a xfrout query, send xfrout response '''
+    def _handle(self):
+        ''' Handle a xfrout query, send xfrout response(s).
+
+        This is separated from the constructor so that we can override
+        it from tests.
+
+        '''
+        # Check the xfrout quota.  We do both increase/decrease in this
+        # method so it's clear we always release it once acuired.
+        quota_ok = self._server.increase_transfers_counter()
+        ex = None
         try:
-            self.dns_xfrout_start(self._sock_fd, self._request_data)
-            #TODO, avoid catching all exceptions
+            self.dns_xfrout_start(self._sock_fd, self._request_data, quota_ok)
         except Exception as e:
-            logger.error(XFROUT_HANDLE_QUERY_ERROR, e)
-            pass
+            # To avoid resource leak we need catch all possible exceptions
+            # We log it later to exclude the case where even logger raises
+            # an exception.
+            ex = e
+
+        # Release any critical resources
+        if quota_ok:
+            self._server.decrease_transfers_counter()
+        self._close_socket()
 
+        if ex is not None:
+            logger.error(XFROUT_HANDLE_QUERY_ERROR, ex)
+
+    def _close_socket(self):
+        '''Simply close the socket via the given FD.
+
+        This is a dedicated subroutine of handle() and is sepsarated from it
+        for the convenience of tests.
+
+        '''
         os.close(self._sock_fd)
 
     def _check_request_tsig(self, msg, request_data):
@@ -127,7 +209,8 @@ class XfroutSession():
         tsig_record = msg.get_tsig_record()
         if tsig_record is not None:
             self._tsig_len = tsig_record.get_length()
-            self._tsig_ctx = self.create_tsig_ctx(tsig_record, self._tsig_key_ring)
+            self._tsig_ctx = self.create_tsig_ctx(tsig_record,
+                                                  self._tsig_key_ring)
             tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
             if tsig_error != TSIGError.NOERROR:
                 return Rcode.NOTAUTH()
@@ -140,40 +223,71 @@ class XfroutSession():
         try:
             msg = Message(Message.PARSE)
             Message.from_wire(msg, mdata)
-
-            # TSIG related checks
-            rcode = self._check_request_tsig(msg, mdata)
-
-            if rcode == Rcode.NOERROR():
-                # ACL checks
-                acl_result = self._acl.execute(
-                    isc.acl.dns.RequestContext(self._remote))
-                if acl_result == DROP:
-                    logger.info(XFROUT_QUERY_DROPPED,
-                                self._get_query_zone_name(msg),
-                                self._get_query_zone_class(msg),
-                                self._remote[0], self._remote[1])
-                    return None, None
-                elif acl_result == REJECT:
-                    logger.info(XFROUT_QUERY_REJECTED,
-                                self._get_query_zone_name(msg),
-                                self._get_query_zone_class(msg),
-                                self._remote[0], self._remote[1])
-                    return Rcode.REFUSED(), msg
-
-        except Exception as err:
+        except Exception as err: # Exception is too broad
             logger.error(XFROUT_PARSE_QUERY_ERROR, err)
             return Rcode.FORMERR(), None
 
+        # TSIG related checks
+        rcode = self._check_request_tsig(msg, mdata)
+        if rcode != Rcode.NOERROR():
+            return rcode, msg
+
+        # Make sure the question is valid.  This should be ensured by
+        # the auth server, but since it's far from xfrout itself, we check
+        # it by ourselves.  A viloation would be an internal bug, so we
+        # raise and stop here rather than returning a FORMERR or SERVFAIL.
+        if msg.get_rr_count(Message.SECTION_QUESTION) != 1:
+            raise RuntimeError('Invalid number of question for XFR: ' +
+                               str(msg.get_rr_count(Message.SECTION_QUESTION)))
+        question = msg.get_question()[0]
+
+        # Identify the request type
+        self._request_type = question.get_type()
+        if self._request_type == RRType.AXFR():
+            self._request_typestr = 'AXFR'
+        elif self._request_type == RRType.IXFR():
+            self._request_typestr = 'IXFR'
+        else:
+            # Likewise, this should be impossible.
+            raise RuntimeError('Unexpected XFR type: ' +
+                               str(self._request_type))
+
+        # ACL checks
+        zone_name = question.get_name()
+        zone_class = question.get_class()
+        acl = self._get_transfer_acl(zone_name, zone_class)
+        acl_result = acl.execute(
+            isc.acl.dns.RequestContext(self._remote[2], msg.get_tsig_record()))
+        if acl_result == DROP:
+            logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_DROPPED,
+                         self._request_type, format_addrinfo(self._remote),
+                         format_zone_str(zone_name, zone_class))
+            return None, None
+        elif acl_result == REJECT:
+            logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_REJECTED,
+                         self._request_type, format_addrinfo(self._remote),
+                         format_zone_str(zone_name, zone_class))
+            return Rcode.REFUSED(), msg
+
         return rcode, msg
 
-    def _get_query_zone_name(self, msg):
-        question = msg.get_question()[0]
-        return question.get_name().to_text()
+    def _get_transfer_acl(self, zone_name, zone_class):
+        '''Return the ACL that should be applied for a given zone.
 
-    def _get_query_zone_class(self, msg):
-        question = msg.get_question()[0]
-        return question.get_class().to_text()
+        The zone is identified by a tuple of name and RR class.
+        If a per zone configuration for the zone exists and contains
+        transfer_acl, that ACL will be used; otherwise, the default
+        ACL will be used.
+
+        '''
+        # Internally zone names are managed in lower cased label characters,
+        # so we first need to convert the name.
+        zone_name_lower = Name(zone_name.to_text(), True)
+        config_key = (zone_class.to_text(), zone_name_lower.to_text())
+        if config_key in self._zone_config and \
+                'transfer_acl' in self._zone_config[config_key]:
+            return self._zone_config[config_key]['transfer_acl']
+        return self._acl
 
     def _send_data(self, sock_fd, data):
         size = len(data)
@@ -210,51 +324,165 @@ class XfroutSession():
         msg.set_rcode(rcode_)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
-    def _zone_has_soa(self, zone):
-        '''Judge if the zone has an SOA record.'''
-        # In some sense, the SOA defines a zone.
-        # If the current name server has authority for the
-        # specific zone, we need to judge if the zone has an SOA record;
-        # if not, we consider the zone has incomplete data, so xfrout can't
-        # serve for it.
-        if sqlite3_ds.get_zone_soa(zone, self._server.get_db_file()):
-            return True
+    def _get_zone_soa(self, zone_name):
+        '''Retrieve the SOA RR of the given zone.
+
+        It returns a pair of RCODE and the SOA (in the form of RRset).
+        On success RCODE is NOERROR and returned SOA is not None;
+        on failure RCODE indicates the appropriate code in the context of
+        xfr processing, and the returned SOA is None.
 
-        return False
-
-    def _zone_exist(self, zonename):
-        '''Judge if the zone is configured by config manager.'''
-        # Currently, if we find the zone in datasource successfully, we
-        # consider the zone is configured, and the current name server has
-        # authority for the specific zone.
-        # TODO: should get zone's configuration from cfgmgr or other place
-        # in future.
-        return sqlite3_ds.zone_exist(zonename, self._server.get_db_file())
-
-    def _check_xfrout_available(self, zone_name):
-        '''Check if xfr request can be responsed.
-           TODO, Get zone's configuration from cfgmgr or some other place
-           eg. check allow_transfer setting,
         '''
-        # If the current name server does not have authority for the
-        # zone, xfrout can't serve for it, return rcode NOTAUTH.
-        if not self._zone_exist(zone_name):
+        result, finder = self._datasrc_client.find_zone(zone_name)
+        if result != DataSourceClient.SUCCESS:
+            return (Rcode.NOTAUTH(), None)
+        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+                                        ZoneFinder.FIND_DEFAULT)
+        if result != ZoneFinder.SUCCESS:
+            return (Rcode.SERVFAIL(), None)
+        # Especially for database-based zones, a working zone may be in
+        # a broken state where it has more than one SOA RR.  We proactively
+        # check the condition and abort the xfr attempt if we identify it.
+        if soa_rrset.get_rdata_count() != 1:
+            return (Rcode.SERVFAIL(), None)
+        return (Rcode.NOERROR(), soa_rrset)
+
+    def __axfr_setup(self, zone_name):
+        '''Setup a zone iterator for AXFR or AXFR-style IXFR.
+
+        '''
+        try:
+            # Note that we enable 'separate_rrs'.  In xfr-out we need to
+            # preserve as many things as possible (even if it's half broken)
+            # stored in the zone.
+            self._iterator = self._datasrc_client.get_iterator(zone_name,
+                                                               True)
+        except isc.datasrc.Error:
+            # If the current name server does not have authority for the
+            # zone, xfrout can't serve for it, return rcode NOTAUTH.
+            # Note: this exception can happen for other reasons.  We should
+            # update get_iterator() API so that we can distinguish "no such
+            # zone" and other cases (#1373).  For now we consider all these
+            # cases as NOTAUTH.
             return Rcode.NOTAUTH()
 
         # If we are an authoritative name server for the zone, but fail
         # to find the zone's SOA record in datasource, xfrout can't
         # provide zone transfer for it.
-        if not self._zone_has_soa(zone_name):
+        self._soa = self._iterator.get_soa()
+        if self._soa is None or self._soa.get_rdata_count() != 1:
             return Rcode.SERVFAIL()
 
-        #TODO, check allow_transfer
-        if not self._server.increase_transfers_counter():
-            return Rcode.REFUSED()
+        return Rcode.NOERROR()
+
+    def __ixfr_setup(self, request_msg, zone_name, zone_class):
+        '''Setup a zone journal reader for IXFR.
+
+        If the underlying data source does not know the requested range
+        of zone differences it automatically falls back to AXFR-style
+        IXFR by setting up a zone iterator instead of a journal reader.
+
+        '''
+        # Check the authority section.  Look for a SOA record with
+        # the same name and class as the question.
+        remote_soa = None
+        for auth_rrset in request_msg.get_section(Message.SECTION_AUTHORITY):
+            # Ignore data whose owner name is not the zone apex, and
+            # ignore non-SOA or different class of records.
+            if auth_rrset.get_name() != zone_name or \
+                    auth_rrset.get_type() != RRType.SOA() or \
+                    auth_rrset.get_class() != zone_class:
+                continue
+            if auth_rrset.get_rdata_count() != 1:
+                logger.info(XFROUT_IXFR_MULTIPLE_SOA,
+                            format_addrinfo(self._remote))
+                return Rcode.FORMERR()
+            remote_soa = auth_rrset
+        if remote_soa is None:
+            logger.info(XFROUT_IXFR_NO_SOA, format_addrinfo(self._remote))
+            return Rcode.FORMERR()
+
+        # Retrieve the local SOA
+        rcode, self._soa = self._get_zone_soa(zone_name)
+        if rcode != Rcode.NOERROR():
+            return rcode
+
+        # RFC1995 says "If an IXFR query with the same or newer version
+        # number than that of the server is received, it is replied to with
+        # a single SOA record of the server's current version, just as
+        # in AXFR".  The claim about AXFR is incorrect, but other than that,
+        # we do as the RFC says.
+        # Note: until we complete #1278 we can only check equality of the
+        # two serials.  The "newer version" case would fall back to AXFR-style.
+        begin_serial = get_soa_serial(remote_soa.get_rdata()[0])
+        end_serial = get_soa_serial(self._soa.get_rdata()[0])
+        if begin_serial == end_serial:
+            # clear both iterator and jnl_reader to signal we won't do
+            # iteration in response generation
+            self._iterator = None
+            self._jnl_reader = None
+            logger.info(XFROUT_IXFR_UPTODATE, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class),
+                        begin_serial, end_serial)
+            return Rcode.NOERROR()
+
+        # Set up the journal reader or fall back to AXFR-style IXFR
+        try:
+            code, self._jnl_reader = self._datasrc_client.get_journal_reader(
+                zone_name, begin_serial, end_serial)
+        except isc.datasrc.NotImplemented as ex:
+            # The underlying data source doesn't support journaling.
+            # Fall back to AXFR-style IXFR.
+            logger.info(XFROUT_IXFR_NO_JOURNAL_SUPPORT,
+                        format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class))
+            return self.__axfr_setup(zone_name)
+        if code == ZoneJournalReader.NO_SUCH_VERSION:
+            logger.info(XFROUT_IXFR_NO_VERSION, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class),
+                        begin_serial, end_serial)
+            return self.__axfr_setup(zone_name)
+        if code == ZoneJournalReader.NO_SUCH_ZONE:
+            # this is quite unexpected as we know zone's SOA exists.
+            # It might be a bug or the data source is somehow broken,
+            # but it can still happen if someone has removed the zone
+            # between these two operations.  We treat it as NOTAUTH.
+            logger.warn(XFROUT_IXFR_NO_ZONE, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class))
+            return Rcode.NOTAUTH()
+
+        # Use the reader as the iterator to generate the response.
+        self._iterator = self._jnl_reader
 
         return Rcode.NOERROR()
 
+    def _xfrout_setup(self, request_msg, zone_name, zone_class):
+        '''Setup a context for xfr responses according to the request type.
+
+        This method identifies the most appropriate data source for the
+        request and set up a zone iterator or journal reader depending on
+        whether the request is AXFR or IXFR.  If it identifies any protocol
+        level error it returns an RCODE other than NOERROR.
+
+        '''
 
-    def dns_xfrout_start(self, sock_fd, msg_query):
+        # Identify the data source for the requested zone and see if it has
+        # SOA while initializing objects used for request processing later.
+        # We should eventually generalize this so that we can choose the
+        # appropriate data source from (possible) multiple candidates.
+        # We should eventually take into account the RR class here.
+        # For now, we hardcode a particular type (SQLite3-based), and only
+        # consider that one.
+        datasrc_config = '{ "database_file": "' + \
+            self._server.get_db_file() + '"}'
+        self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
+
+        if self._request_type == RRType.AXFR():
+            return self.__axfr_setup(zone_name)
+        else:
+            return self.__ixfr_setup(request_msg, zone_name, zone_class)
+
+    def dns_xfrout_start(self, sock_fd, msg_query, quota_ok=True):
         rcode_, msg = self._parse_query_message(msg_query)
         #TODO. create query message and parse header
         if rcode_ is None: # Dropped by ACL
@@ -264,29 +492,38 @@ class XfroutSession():
         elif rcode_ != Rcode.NOERROR():
             return self._reply_query_with_error_rcode(msg, sock_fd,
                                                       Rcode.FORMERR())
+        elif not quota_ok:
+            logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_typestr,
+                        format_addrinfo(self._remote),
+                        self._server._max_transfers_out)
+            return self._reply_query_with_error_rcode(msg, sock_fd,
+                                                      Rcode.REFUSED())
 
-        zone_name = self._get_query_zone_name(msg)
-        zone_class_str = self._get_query_zone_class(msg)
-        # TODO: should we not also include class in the check?
-        rcode_ = self._check_xfrout_available(zone_name)
+        question = msg.get_question()[0]
+        zone_name = question.get_name()
+        zone_class = question.get_class()
+        zone_str = format_zone_str(zone_name, zone_class) # for logging
 
+        try:
+            rcode_ = self._xfrout_setup(msg, zone_name, zone_class)
+        except Exception as ex:
+            logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_typestr,
+                         format_addrinfo(self._remote), zone_str, ex)
+            rcode_ = Rcode.SERVFAIL()
         if rcode_ != Rcode.NOERROR():
-            logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
-                        zone_class_str, rcode_.to_text())
+            logger.info(XFROUT_XFR_TRANSFER_FAILED, self._request_typestr,
+                        format_addrinfo(self._remote), zone_str, rcode_)
             return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
 
         try:
-            logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
-            self._reply_xfrout_query(msg, sock_fd, zone_name)
+            logger.info(XFROUT_XFR_TRANSFER_STARTED, self._request_typestr,
+                        format_addrinfo(self._remote), zone_str)
+            self._reply_xfrout_query(msg, sock_fd)
         except Exception as err:
-            logger.error(XFROUT_AXFR_TRANSFER_ERROR, zone_name,
-                         zone_class_str, str(err))
-            pass
-        logger.info(XFROUT_AXFR_TRANSFER_DONE, zone_name, zone_class_str)
-
-        self._server.decrease_transfers_counter()
-        return
-
+            logger.error(XFROUT_XFR_TRANSFER_ERROR, self._request_typestr,
+                    format_addrinfo(self._remote), zone_str, err)
+        logger.info(XFROUT_XFR_TRANSFER_DONE, self._request_typestr,
+                    format_addrinfo(self._remote), zone_str)
 
     def _clear_message(self, msg):
         qid = msg.get_qid()
@@ -301,92 +538,100 @@ class XfroutSession():
         msg.set_header_flag(Message.HEADERFLAG_QR)
         return msg
 
-    def _create_rrset_from_db_record(self, record):
-        '''Create one rrset from one record of datasource, if the schema of record is changed,
-        This function should be updated first.
-        '''
-        rrtype_ = RRType(record[5])
-        rdata_ = Rdata(rrtype_, RRClass("IN"), " ".join(record[7:]))
-        rrset_ = RRset(Name(record[2]), RRClass("IN"), rrtype_, RRTTL( int(record[4])))
-        rrset_.add_rdata(rdata_)
-        return rrset_
-
-    def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len,
-                                    count_since_last_tsig_sign):
-        '''Add the SOA record to the end of message. If it can't be
-        added, a new message should be created to send out the last soa .
-        '''
-        rrset_len = get_rrset_len(rrset_soa)
+    def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa,
+                                    message_upper_len):
+        '''Add the SOA record to the end of message.
 
-        if (count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH and
-            message_upper_len + rrset_len >= XFROUT_MAX_MESSAGE_SIZE):
-            # If tsig context exist, sign the packet with serial number TSIG_SIGN_EVERY_NTH
+        If it would exceed the maximum allowable size of a message, a new
+        message will be created to send out the last SOA.
+
+        We assume a message with a single SOA can always fit the buffer
+        with or without TSIG.  In theory this could be wrong if TSIG is
+        stupidly large, but in practice this assumption should be reasonable.
+        '''
+        if message_upper_len + get_rrset_len(rrset_soa) > \
+                XFROUT_MAX_MESSAGE_SIZE:
             self._send_message(sock_fd, msg, self._tsig_ctx)
             msg = self._clear_message(msg)
-        elif (count_since_last_tsig_sign != TSIG_SIGN_EVERY_NTH and
-              message_upper_len + rrset_len + self._tsig_len >= XFROUT_MAX_MESSAGE_SIZE):
-            self._send_message(sock_fd, msg)
-            msg = self._clear_message(msg)
 
-        # If tsig context exist, sign the last packet
         msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
-
-    def _reply_xfrout_query(self, msg, sock_fd, zone_name):
-        #TODO, there should be a better way to insert rrset.
-        count_since_last_tsig_sign = TSIG_SIGN_EVERY_NTH
+    def _reply_xfrout_query(self, msg, sock_fd):
         msg.make_response()
         msg.set_header_flag(Message.HEADERFLAG_AA)
-        soa_record = sqlite3_ds.get_zone_soa(zone_name, self._server.get_db_file())
-        rrset_soa = self._create_rrset_from_db_record(soa_record)
-        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        # Reserved space for the fixed header size, the size of the question
+        # section, and TSIG size (when included).  The size of the question
+        # section is the sum of the qname length and the size of the
+        # fixed-length fields (type and class, 2 bytes each).
+        message_upper_len = XFROUT_DNS_HEADER_SIZE + \
+            msg.get_question()[0].get_name().get_length() + 4 + \
+            self._tsig_len
+
+        # If the iterator is None, we are responding to IXFR with a single
+        # SOA RR.
+        if self._iterator is None:
+            self._send_message_with_last_soa(msg, sock_fd, self._soa,
+                                             message_upper_len)
+            return
 
-        message_upper_len = get_rrset_len(rrset_soa) + self._tsig_len
+        # Add the beginning SOA
+        msg.add_rrset(Message.SECTION_ANSWER, self._soa)
+        message_upper_len += get_rrset_len(self._soa)
 
-        for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
-            if  self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
+        # Add the rest of the zone/diff contets
+        for rrset in self._iterator:
+            # Check if xfrout is shutdown
+            if  self._server._shutdown_event.is_set():
                 logger.info(XFROUT_STOPPING)
                 return
-            # TODO: RRType.SOA() ?
-            if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
-                continue
 
-            rrset_ = self._create_rrset_from_db_record(rr_data)
+            # For AXFR (or AXFR-style IXFR), in which case _jnl_reader is None,
+            # we should skip SOAs from the iterator.
+            if self._jnl_reader is None and rrset.get_type() == RRType.SOA():
+                continue
 
             # We calculate the maximum size of the RRset (i.e. the
             # size without compression) and use that to see if we
             # may have reached the limit
-            rrset_len = get_rrset_len(rrset_)
-            if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
-                msg.add_rrset(Message.SECTION_ANSWER, rrset_)
+            rrset_len = get_rrset_len(rrset)
+
+            if message_upper_len + rrset_len <= XFROUT_MAX_MESSAGE_SIZE:
+                msg.add_rrset(Message.SECTION_ANSWER, rrset)
                 message_upper_len += rrset_len
                 continue
 
-            # If tsig context exist, sign every N packets
-            if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
-                count_since_last_tsig_sign = 0
-                self._send_message(sock_fd, msg, self._tsig_ctx)
-            else:
-                self._send_message(sock_fd, msg)
+            # RR would not fit.  If there are other RRs in the buffer, send
+            # them now and leave this RR to the next message.
+            self._send_message(sock_fd, msg, self._tsig_ctx)
 
-            count_since_last_tsig_sign += 1
+            # Create a new message and reserve space for the carried-over
+            # RR (and TSIG space in case it's to be TSIG signed)
             msg = self._clear_message(msg)
-            msg.add_rrset(Message.SECTION_ANSWER, rrset_) # Add the rrset to the new message
-
-            # Reserve tsig space for signed packet
-            if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
-                message_upper_len = rrset_len + self._tsig_len
-            else:
-                message_upper_len = rrset_len
-
-        self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
-                                         count_since_last_tsig_sign)
-
-class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
+            message_upper_len = XFROUT_DNS_HEADER_SIZE + rrset_len + \
+                self._tsig_len
+
+            # If this RR overflows the buffer all by itself, fail.  In theory
+            # some RRs might fit in a TCP message when compressed even if they
+            # do not fit when uncompressed, but surely we don't want to send
+            # such monstrosities to an unsuspecting slave.
+            if message_upper_len > XFROUT_MAX_MESSAGE_SIZE:
+                raise XfroutSessionError('RR too large for zone transfer (' +
+                                         str(rrset_len) + ' bytes)')
+
+            # Add the RRset to the new message
+            msg.add_rrset(Message.SECTION_ANSWER, rrset)
+
+        # Add and send the trailing SOA
+        self._send_message_with_last_soa(msg, sock_fd, self._soa,
+                                         message_upper_len)
+
+class UnixSockServer(socketserver_mixin.NoPollMixIn,
+                     ThreadingUnixStreamServer):
     '''The unix domain socket server which accept xfr query sent from auth server.'''
 
-    def __init__(self, sock_file, handle_class, shutdown_event, config_data, cc):
+    def __init__(self, sock_file, handle_class, shutdown_event, config_data,
+                 cc):
         self._remove_unused_sock_file(sock_file)
         self._sock_file = sock_file
         socketserver_mixin.NoPollMixIn.__init__(self)
@@ -394,16 +639,15 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         self._shutdown_event = shutdown_event
         self._write_sock, self._read_sock = socket.socketpair()
         self._common_init()
-        self.update_config_data(config_data)
         self._cc = cc
+        self.update_config_data(config_data)
 
     def _common_init(self):
+        '''Initialization shared with the mock server class used for tests'''
         self._lock = threading.Lock()
         self._transfers_counter = 0
-        # This default value will probably get overwritten by the (same)
-        # default value from the spec file. This is here just to make
-        # sure and to make the default value in tests consistent.
-        self._acl = REQUEST_LOADER.load('[{"action": "ACCEPT"}]')
+        self._zone_config = {}
+        self._acl = None # this will be initialized in update_config_data()
 
     def _receive_query_message(self, sock):
         ''' receive request message from sock'''
@@ -454,7 +698,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             try:
                 self.process_request(request)
             except Exception as pre:
-                log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
+                logger.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
                 break
 
     def _handle_request_noblock(self):
@@ -481,16 +725,19 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
         if not request_data:
             return
 
-        t = threading.Thread(target = self.finish_request,
+        t = threading.Thread(target=self.finish_request,
                              args = (sock_fd, request_data))
         if self.daemon_threads:
             t.daemon = True
         t.start()
 
     def _guess_remote(self, sock_fd):
-        """
-           Guess remote address and port of the socket. The sock_fd must be a
-           socket
+        """Guess remote address and port of the socket.
+
+        The sock_fd must be a file descriptor of a socket.
+        This method retuns a 3-tuple consisting of address family,
+        socket type, and a 2-tuple with the address (string) and port (int).
+
         """
         # This uses a trick. If the socket is IPv4 in reality and we pretend
         # it to be IPv6, it returns IPv4 address anyway. This doesn't seem
@@ -502,13 +749,32 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             # To make it work even on hosts without IPv6 support
             # (Any idea how to simulate this in test?)
             sock = socket.fromfd(sock_fd, socket.AF_INET, socket.SOCK_STREAM)
-        return sock.getpeername()
+        peer = sock.getpeername()
+
+        # Identify the correct socket family.  Due to the above "trick",
+        # we cannot simply use sock.family.
+        family = socket.AF_INET6
+        try:
+            socket.inet_pton(socket.AF_INET6, peer[0])
+        except socket.error:
+            family = socket.AF_INET
+        return (family, socket.SOCK_STREAM, peer)
 
     def finish_request(self, sock_fd, request_data):
-        '''Finish one request by instantiating RequestHandlerClass.'''
+        '''Finish one request by instantiating RequestHandlerClass.
+
+        This is an entry point of a separate thread spawned in
+        UnixSockServer.process_request().
+
+        This method creates a XfroutSession object.
+        '''
+        self._lock.acquire()
+        acl = self._acl
+        zone_config = self._zone_config
+        self._lock.release()
         self.RequestHandlerClass(sock_fd, request_data, self,
                                  self.tsig_key_ring,
-                                 self._guess_remote(sock_fd), self._acl)
+                                 self._guess_remote(sock_fd), acl, zone_config)
 
     def _remove_unused_sock_file(self, sock_file):
         '''Try to remove the socket file. If the file is being used
@@ -547,19 +813,67 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn, ThreadingUnixStreamServer):
             os.unlink(self._sock_file)
         except Exception as e:
             logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
-            pass
 
     def update_config_data(self, new_config):
-        '''Apply the new config setting of xfrout module. '''
-        logger.info(XFROUT_NEW_CONFIG)
-        if 'query_acl' in new_config:
-            self._acl = REQUEST_LOADER.load(new_config['query_acl'])
+        '''Apply the new config setting of xfrout module.
+
+        '''
         self._lock.acquire()
-        self._max_transfers_out = new_config.get('transfers_out')
-        self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+        try:
+            logger.info(XFROUT_NEW_CONFIG)
+            new_acl = self._acl
+            if 'transfer_acl' in new_config:
+                try:
+                    new_acl = REQUEST_LOADER.load(new_config['transfer_acl'])
+                except LoaderError as e:
+                    raise XfroutConfigError('Failed to parse transfer_acl: ' +
+                                            str(e))
+
+            new_zone_config = self._zone_config
+            zconfig_data = new_config.get('zone_config')
+            if zconfig_data is not None:
+                new_zone_config = self.__create_zone_config(zconfig_data)
+
+            self._acl = new_acl
+            self._zone_config = new_zone_config
+            self._max_transfers_out = new_config.get('transfers_out')
+            self.set_tsig_key_ring(new_config.get('tsig_key_ring'))
+        except Exception as e:
+            self._lock.release()
+            raise e
         self._lock.release()
         logger.info(XFROUT_NEW_CONFIG_DONE)
 
+    def __create_zone_config(self, zone_config_list):
+        new_config = {}
+        for zconf in zone_config_list:
+            # convert the class, origin (name) pair.  First build pydnspp
+            # object to reject invalid input.
+            zclass_str = zconf.get('class')
+            if zclass_str is None:
+                #zclass_str = 'IN' # temporary
+                zclass_str = self._cc.get_default_value('zone_config/class')
+            zclass = RRClass(zclass_str)
+            zorigin = Name(zconf['origin'], True)
+            config_key = (zclass.to_text(), zorigin.to_text())
+
+            # reject duplicate config
+            if config_key in new_config:
+                raise XfroutConfigError('Duplicate zone_config for ' +
+                                        str(zorigin) + '/' + str(zclass))
+
+            # create a new config entry, build any given (and known) config
+            new_config[config_key] = {}
+            if 'transfer_acl' in zconf:
+                try:
+                    new_config[config_key]['transfer_acl'] = \
+                        REQUEST_LOADER.load(zconf['transfer_acl'])
+                except LoaderError as e:
+                    raise XfroutConfigError('Failed to parse transfer_acl ' +
+                                            'for ' + zorigin.to_text() + '/' +
+                                            zclass_str + ': ' + str(e))
+        return new_config
+
     def set_tsig_key_ring(self, key_list):
         """Set the tsig_key_ring , given a TSIG key string list representation. """
 
@@ -616,8 +930,10 @@ class XfroutServer:
 
     def _start_xfr_query_listener(self):
         '''Start a new thread to accept xfr query. '''
-        self._unix_socket_server = UnixSockServer(self._listen_sock_file, XfroutSession,
-                                                  self._shutdown_event, self._config_data,
+        self._unix_socket_server = UnixSockServer(self._listen_sock_file,
+                                                  XfroutSession,
+                                                  self._shutdown_event,
+                                                  self._config_data,
                                                   self._cc)
         listener = threading.Thread(target=self._unix_socket_server.serve_forever)
         listener.start()
@@ -725,6 +1041,10 @@ if '__main__' == __name__:
         logger.INFO(XFROUT_STOPPED_BY_KEYBOARD)
     except SessionError as e:
         logger.error(XFROUT_CC_SESSION_ERROR, str(e))
+    except ModuleCCSessionError as e:
+        logger.error(XFROUT_MODULECC_SESSION_ERROR, str(e))
+    except XfroutConfigError as e:
+        logger.error(XFROUT_CONFIG_ERROR, str(e))
     except SessionTimeout as e:
         logger.error(XFROUT_CC_SESSION_TIMEOUT_ERROR)
 
diff --git a/src/bin/xfrout/xfrout.spec.pre.in b/src/bin/xfrout/xfrout.spec.pre.in
index 8ecbb0b..0891a57 100644
--- a/src/bin/xfrout/xfrout.spec.pre.in
+++ b/src/bin/xfrout/xfrout.spec.pre.in
@@ -51,7 +51,7 @@
          }
        },
        {
-         "item_name": "query_acl",
+         "item_name": "transfer_acl",
          "item_type": "list",
          "item_optional": false,
          "item_default": [{"action": "ACCEPT"}],
@@ -61,6 +61,45 @@
              "item_type": "any",
              "item_optional": true
          }
+       },
+       {
+         "item_name": "zone_config",
+         "item_type": "list",
+         "item_optional": true,
+         "item_default": [],
+         "list_item_spec":
+         {
+             "item_name": "zone_config_element",
+             "item_type": "map",
+             "item_optional": true,
+             "item_default": { "origin": "" },
+             "map_item_spec": [
+               {
+                   "item_name": "origin",
+                   "item_type": "string",
+                   "item_optional": false,
+                   "item_default": ""
+               },
+               {
+                   "item_name": "class",
+                   "item_type": "string",
+                   "item_optional": false,
+                   "item_default": "IN"
+               },
+               {
+                   "item_name": "transfer_acl",
+                   "item_type": "list",
+                   "item_optional": true,
+                   "item_default": [{"action": "ACCEPT"}],
+                   "list_item_spec":
+                   {
+                       "item_name": "acl_element",
+                       "item_type": "any",
+                       "item_optional": true
+                   }
+               }
+             ]
+         }
        }
       ],
       "commands": [
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index 19b104e..fcc2e59 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -15,30 +15,6 @@
 # No namespace declaration - these constants go in the global namespace
 # of the xfrout messages python module.
 
-% XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-
-% XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-
-% XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-# Still a TODO, but when implemented, REFUSED can also mean
-# the client is not allowed to transfer the zone
-
-% XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started
-A transfer out of the given zone has started.
-
 % XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
 The TSIG key string as read from the configuration does not represent
 a valid TSIG key.
@@ -47,8 +23,19 @@ a valid TSIG key.
 There was a problem reading from the command and control channel. The
 most likely cause is that the msgq daemon is not running.
 
+% XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1
+There was a problem in the lower level module handling configuration and
+control commands.  This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization.  A detailed error message from the module
+will also be displayed.
+
+% XFROUT_CONFIG_ERROR error found in configuration data: %1
+The xfrout process encountered an error when installing the configuration at
+startup time.  Details of the error are included in the log message.
+
 % XFROUT_CC_SESSION_TIMEOUT_ERROR timeout waiting for cc response
-There was a problem reading a response from antoher module over the
+There was a problem reading a response from another module over the
 command and control channel. The most likely cause is that the
 configuration manager b10-cfgmgr is not running.
 
@@ -95,16 +82,27 @@ in the log message, but at this point no specific information other
 than that could be given. This points to incomplete exception handling
 in the code.
 
-% XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
+% XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped
+The xfrout process silently dropped a request to transfer zone to
+given host.  This is required by the ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
 
-% XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected
+% XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected
 The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+
+% XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message.  The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
 
 % XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
 There was an error receiving the file descriptor for the transfer
@@ -149,3 +147,72 @@ on, but the file is in use. The most likely cause is that another
 xfrout daemon process is still running. This xfrout daemon (the one
 printing this message) will not start.
 
+% XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+
+% XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+
+% XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage.  Some detail of the error is also included in the
+message.  The xfrout server tries to return a SERVFAIL response in this case.
+
+% XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+# Still a TODO, but when implemented, REFUSED can also mean
+# the client is not allowed to transfer the zone
+
+% XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started
+A transfer out of the given zone has started.
+
+% XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs
+An IXFR request was received with more than one SOA RRs in the authority
+section.  The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+
+% XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+
+% XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR
+An IXFR request was received but the underlying data source did
+not support journaling.  The xfrout daemon fell back to AXFR-style
+IXFR.
+
+% XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server.  The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic.  This will soon
+be implemented.
+
+% XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR
+An IXFR request was received, but the requested range of differences
+were not found in the data source.  The xfrout daemon fell back to
+AXFR-style IXFR.
+
+% XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source.  This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone.  The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
diff --git a/src/bin/zonemgr/Makefile.am b/src/bin/zonemgr/Makefile.am
index 8ab5f7a..aa427fd 100644
--- a/src/bin/zonemgr/Makefile.am
+++ b/src/bin/zonemgr/Makefile.am
@@ -7,10 +7,15 @@ pkglibexec_SCRIPTS = b10-zonemgr
 b10_zonemgrdir = $(pkgdatadir)
 b10_zonemgr_DATA = zonemgr.spec
 
-CLEANFILES = b10-zonemgr zonemgr.pyc zonemgr.spec
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES  = b10-zonemgr zonemgr.pyc zonemgr.spec
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.pyc
 
 man_MANS = b10-zonemgr.8
-EXTRA_DIST = $(man_MANS) b10-zonemgr.xml
+EXTRA_DIST = $(man_MANS) b10-zonemgr.xml zonemgr_messages.mes
 
 if ENABLE_MAN
 
@@ -19,10 +24,15 @@ b10-zonemgr.8: b10-zonemgr.xml
 
 endif
 
+# Build logging source file from message files
+$(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py : zonemgr_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/zonemgr_messages.mes
+
 zonemgr.spec: zonemgr.spec.pre
 	$(SED) -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.spec.pre >$@
 
-b10-zonemgr: zonemgr.py
+b10-zonemgr: zonemgr.py $(PYTHON_LOGMSGPKG_DIR)/work/zonemgr_messages.py
 	$(SED) -e "s|@@PYTHONPATH@@|@pyexecdir@|" \
 	       -e "s|@@LOCALSTATEDIR@@|$(localstatedir)|" zonemgr.py >$@
 	chmod a+x $@
diff --git a/src/bin/zonemgr/tests/Makefile.am b/src/bin/zonemgr/tests/Makefile.am
index 97f9b5e..769d332 100644
--- a/src/bin/zonemgr/tests/Makefile.am
+++ b/src/bin/zonemgr/tests/Makefile.am
@@ -7,7 +7,7 @@ CLEANFILES = initdb.file
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -20,6 +20,6 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/zonemgr:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/xfr/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/zonemgr/tests/zonemgr_test.py b/src/bin/zonemgr/tests/zonemgr_test.py
index 496ce6b..80e41b3 100644
--- a/src/bin/zonemgr/tests/zonemgr_test.py
+++ b/src/bin/zonemgr/tests/zonemgr_test.py
@@ -152,6 +152,16 @@ class TestZonemgrRefresh(unittest.TestCase):
         self.assertTrue((time1 + 3600 * (1 - self.zone_refresh._refresh_jitter)) <= zone_timeout)
         self.assertTrue(zone_timeout <= time2 + 3600)
 
+        # No soa rdata
+        self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_soa_rdata"] = None
+        time3 = time.time()
+        self.zone_refresh._set_zone_retry_timer(ZONE_NAME_CLASS1_IN)
+        zone_timeout = self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["next_refresh_time"]
+        time4 = time.time()
+        self.assertTrue((time3 + self.zone_refresh._lowerbound_retry * (1 - self.zone_refresh._refresh_jitter))
+                         <= zone_timeout)
+        self.assertTrue(zone_timeout <= time4 + self.zone_refresh._lowerbound_retry)
+
     def test_zone_not_exist(self):
         self.assertFalse(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_IN))
         self.assertTrue(self.zone_refresh._zone_not_exist(ZONE_NAME_CLASS1_CH))
@@ -304,8 +314,8 @@ class TestZonemgrRefresh(unittest.TestCase):
         def get_zone_soa2(zone_name, db_file):
             return None
         sqlite3_ds.get_zone_soa = get_zone_soa2
-        self.assertRaises(ZonemgrException, self.zone_refresh.zonemgr_add_zone, \
-                                         ZONE_NAME_CLASS1_IN)
+        self.zone_refresh.zonemgr_add_zone(ZONE_NAME_CLASS2_IN)
+        self.assertTrue(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS2_IN]["zone_soa_rdata"] is None)
         sqlite3_ds.get_zone_soa = old_get_zone_soa
 
     def test_zone_handle_notify(self):
@@ -362,6 +372,15 @@ class TestZonemgrRefresh(unittest.TestCase):
         self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_CH)
         self.assertRaises(ZonemgrException, self.zone_refresh.zone_refresh_fail, ZONE_NAME_CLASS3_IN)
 
+        old_get_zone_soa = sqlite3_ds.get_zone_soa
+        def get_zone_soa(zone_name, db_file):
+            return None
+        sqlite3_ds.get_zone_soa = get_zone_soa
+        self.zone_refresh.zone_refresh_fail(ZONE_NAME_CLASS1_IN)
+        self.assertEqual(self.zone_refresh._zonemgr_refresh_info[ZONE_NAME_CLASS1_IN]["zone_state"],
+                         ZONE_EXPIRED)
+        sqlite3_ds.get_zone_soa = old_get_zone_soa
+
     def test_find_need_do_refresh_zone(self):
         time1 = time.time()
         self.zone_refresh._zonemgr_refresh_info = {
@@ -440,6 +459,8 @@ class TestZonemgrRefresh(unittest.TestCase):
                                            "class": "IN" } ]
                 }
         self.zone_refresh.update_config_data(config_data)
+        self.assertTrue(("example.net.", "IN") in
+                        self.zone_refresh._zonemgr_refresh_info)
 
         # update all values
         config_data = {
@@ -479,14 +500,16 @@ class TestZonemgrRefresh(unittest.TestCase):
                     "secondary_zones": [ { "name": "doesnotexist",
                                            "class": "IN" } ]
                 }
-        self.assertRaises(ZonemgrException,
-                          self.zone_refresh.update_config_data,
-                          config_data)
-        self.assertEqual(60, self.zone_refresh._lowerbound_refresh)
-        self.assertEqual(30, self.zone_refresh._lowerbound_retry)
-        self.assertEqual(19800, self.zone_refresh._max_transfer_timeout)
-        self.assertEqual(0.25, self.zone_refresh._refresh_jitter)
-        self.assertEqual(0.35, self.zone_refresh._reload_jitter)
+        self.zone_refresh.update_config_data(config_data)
+        name_class = ("doesnotexist.", "IN")
+        self.assertTrue(self.zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+                        is None)
+        # The other configs should be updated successfully
+        self.assertEqual(61, self.zone_refresh._lowerbound_refresh)
+        self.assertEqual(31, self.zone_refresh._lowerbound_retry)
+        self.assertEqual(19801, self.zone_refresh._max_transfer_timeout)
+        self.assertEqual(0.21, self.zone_refresh._refresh_jitter)
+        self.assertEqual(0.71, self.zone_refresh._reload_jitter)
 
         # Make sure we accept 0 as a value
         config_data = {
@@ -526,10 +549,11 @@ class TestZonemgrRefresh(unittest.TestCase):
                         self.zone_refresh._zonemgr_refresh_info)
         # This one does not exist
         config.set_zone_list_from_name_classes(["example.net", "CH"])
-        self.assertRaises(ZonemgrException,
-                          self.zone_refresh.update_config_data, config)
-        # So it should not affect the old ones
-        self.assertTrue(("example.net.", "IN") in
+        self.zone_refresh.update_config_data(config)
+        self.assertFalse(("example.net.", "CH") in
+                        self.zone_refresh._zonemgr_refresh_info)
+        # Simply skip loading soa for the zone, the other configs should be updated successful
+        self.assertFalse(("example.net.", "IN") in
                         self.zone_refresh._zonemgr_refresh_info)
         # Make sure it works even when we "accidentally" forget the final dot
         config.set_zone_list_from_name_classes([("example.net", "IN")])
@@ -596,15 +620,18 @@ class TestZonemgr(unittest.TestCase):
         config_data3 = {"refresh_jitter" : 0.7}
         self.zonemgr.config_handler(config_data3)
         self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
-        # The zone doesn't exist in database, it should be rejected
+        # The zone doesn't exist in database, simply skip loading soa for it and log an warning
         self.zonemgr._zone_refresh = ZonemgrRefresh(None, "initdb.file", None,
                                                     config_data1)
         config_data1["secondary_zones"] = [{"name": "nonexistent.example",
                                             "class": "IN"}]
-        self.assertNotEqual(self.zonemgr.config_handler(config_data1),
-                            {"result": [0]})
-        # As it is rejected, the old value should be kept
-        self.assertEqual(0.5, self.zonemgr._config_data.get("refresh_jitter"))
+        self.assertEqual(self.zonemgr.config_handler(config_data1),
+                         {"result": [0]})
+        # other configs should be updated successfully
+        name_class = ("nonexistent.example.", "IN")
+        self.assertTrue(self.zonemgr._zone_refresh._zonemgr_refresh_info[name_class]["zone_soa_rdata"]
+                        is None)
+        self.assertEqual(0.1, self.zonemgr._config_data.get("refresh_jitter"))
 
     def test_get_db_file(self):
         self.assertEqual("initdb.file", self.zonemgr.get_db_file())
diff --git a/src/bin/zonemgr/zonemgr.py.in b/src/bin/zonemgr/zonemgr.py.in
index c6e3163..5bdb765 100755
--- a/src/bin/zonemgr/zonemgr.py.in
+++ b/src/bin/zonemgr/zonemgr.py.in
@@ -37,6 +37,16 @@ from isc.datasrc import sqlite3_ds
 from optparse import OptionParser, OptionValueError
 from isc.config.ccsession import *
 import isc.util.process
+from isc.log_messages.zonemgr_messages import *
+
+# Initialize logging for called modules.
+isc.log.init("b10-zonemgr")
+logger = isc.log.Logger("zonemgr")
+
+# Constants for debug levels.
+DBG_START_SHUT = logger.DBGLVL_START_SHUT
+DBG_ZONEMGR_COMMAND = logger.DBGLVL_COMMAND
+DBG_ZONEMGR_BASIC = logger.DBGLVL_TRACE_BASIC
 
 isc.util.process.rename()
 
@@ -77,13 +87,6 @@ REFRESH_OFFSET = 3
 RETRY_OFFSET = 4
 EXPIRED_OFFSET = 5
 
-# verbose mode
-VERBOSE_MODE = False
-
-def log_msg(msg):
-    if VERBOSE_MODE:
-        sys.stdout.write("[b10-zonemgr] %s\n" % str(msg))
-
 class ZonemgrException(Exception):
     pass
 
@@ -93,7 +96,6 @@ class ZonemgrRefresh:
     do zone refresh.
     Zone timers can be started by calling run_timer(), and it
     can be stopped by calling shutdown() in another thread.
-
     """
 
     def __init__(self, cc, db_file, slave_socket, config_data):
@@ -140,7 +142,10 @@ class ZonemgrRefresh:
         """Set zone next refresh time after zone refresh fail.
            now + retry - retry_jitter <= next_refresh_time <= now + retry
            """
-        zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+        if (self._get_zone_soa_rdata(zone_name_class) is not None):
+            zone_retry_time = float(self._get_zone_soa_rdata(zone_name_class).split(" ")[RETRY_OFFSET])
+        else:
+            zone_retry_time = 0.0
         zone_retry_time = max(self._lowerbound_retry, zone_retry_time)
         self._set_zone_timer(zone_name_class, zone_retry_time, self._refresh_jitter * zone_retry_time)
 
@@ -157,6 +162,7 @@ class ZonemgrRefresh:
     def zone_refresh_success(self, zone_name_class):
         """Update zone info after zone refresh success"""
         if (self._zone_not_exist(zone_name_class)):
+            logger.error(ZONEMGR_UNKNOWN_ZONE_SUCCESS, zone_name_class[0], zone_name_class[1])
             raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
                                    "belong to zonemgr" % zone_name_class)
         self.zonemgr_reload_zone(zone_name_class)
@@ -167,10 +173,12 @@ class ZonemgrRefresh:
     def zone_refresh_fail(self, zone_name_class):
         """Update zone info after zone refresh fail"""
         if (self._zone_not_exist(zone_name_class)):
+            logger.error(ZONEMGR_UNKNOWN_ZONE_FAIL, zone_name_class[0], zone_name_class[1])
             raise ZonemgrException("[b10-zonemgr] Zone (%s, %s) doesn't "
                                    "belong to zonemgr" % zone_name_class)
         # Is zone expired?
-        if (self._zone_is_expired(zone_name_class)):
+        if ((self._get_zone_soa_rdata(zone_name_class) is None) or
+            self._zone_is_expired(zone_name_class)):
             self._set_zone_state(zone_name_class, ZONE_EXPIRED)
         else:
             self._set_zone_state(zone_name_class, ZONE_OK)
@@ -179,6 +187,7 @@ class ZonemgrRefresh:
     def zone_handle_notify(self, zone_name_class, master):
         """Handle zone notify"""
         if (self._zone_not_exist(zone_name_class)):
+            logger.error(ZONEMGR_UNKNOWN_ZONE_NOTIFIED, zone_name_class[0], zone_name_class[1])
             raise ZonemgrException("[b10-zonemgr] Notified zone (%s, %s) "
                                    "doesn't belong to zonemgr" % zone_name_class)
         self._set_zone_notifier_master(zone_name_class, master)
@@ -191,19 +200,23 @@ class ZonemgrRefresh:
 
     def zonemgr_add_zone(self, zone_name_class):
         """ Add a zone into zone manager."""
-        log_msg("Loading zone (%s, %s)" % zone_name_class)
+
+        logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_LOAD_ZONE, zone_name_class[0], zone_name_class[1])
         zone_info = {}
         zone_soa = sqlite3_ds.get_zone_soa(str(zone_name_class[0]), self._db_file)
-        if not zone_soa:
-            raise ZonemgrException("[b10-zonemgr] zone (%s, %s) doesn't have soa." % zone_name_class)
-        zone_info["zone_soa_rdata"] = zone_soa[7]
+        if zone_soa is None:
+            logger.warn(ZONEMGR_NO_SOA, zone_name_class[0], zone_name_class[1])
+            zone_info["zone_soa_rdata"] = None
+            zone_reload_time = 0.0
+        else:
+            zone_info["zone_soa_rdata"] = zone_soa[7]
+            zone_reload_time = float(zone_soa[7].split(" ")[RETRY_OFFSET])
         zone_info["zone_state"] = ZONE_OK
         zone_info["last_refresh_time"] = self._get_current_time()
         self._zonemgr_refresh_info[zone_name_class] = zone_info
         # Imposes some random jitters to avoid many zones need to do refresh at the same time.
-        zone_reload_jitter = float(zone_soa[7].split(" ")[RETRY_OFFSET])
-        zone_reload_jitter = max(self._lowerbound_retry, zone_reload_jitter)
-        self._set_zone_timer(zone_name_class, zone_reload_jitter, self._reload_jitter * zone_reload_jitter)
+        zone_reload_time = max(self._lowerbound_retry, zone_reload_time)
+        self._set_zone_timer(zone_name_class, zone_reload_time, self._reload_jitter * zone_reload_time)
 
     def _zone_is_expired(self, zone_name_class):
         """Judge whether a zone is expired or not."""
@@ -265,7 +278,7 @@ class ZonemgrRefresh:
             except isc.cc.session.SessionTimeout:
                 pass        # for now we just ignore the failure
         except socket.error:
-            sys.stderr.write("[b10-zonemgr] Failed to send to module %s, the session has been closed." % module_name)
+            logger.error(ZONEMGR_SEND_FAIL, module_name)
 
     def _find_need_do_refresh_zone(self):
         """Find the first zone need do refresh, if no zone need
@@ -274,7 +287,8 @@ class ZonemgrRefresh:
         zone_need_refresh = None
         for zone_name_class in self._zonemgr_refresh_info.keys():
             zone_state = self._get_zone_state(zone_name_class)
-            # If hasn't received refresh response but are within refresh timeout, skip the zone
+            # If hasn't received refresh response but are within refresh
+            # timeout, skip the zone
             if (ZONE_REFRESHING == zone_state and
                 (self._get_zone_refresh_timeout(zone_name_class) > self._get_current_time())):
                 continue
@@ -294,7 +308,7 @@ class ZonemgrRefresh:
 
     def _do_refresh(self, zone_name_class):
         """Do zone refresh."""
-        log_msg("Do refresh for zone (%s, %s)." % zone_name_class)
+        logger.debug(DBG_ZONEMGR_BASIC, ZONEMGR_REFRESH_ZONE, zone_name_class[0], zone_name_class[1])
         self._set_zone_state(zone_name_class, ZONE_REFRESHING)
         self._set_zone_refresh_timeout(zone_name_class, self._get_current_time() + self._max_transfer_timeout)
         notify_master = self._get_zone_notifier_master(zone_name_class)
@@ -351,7 +365,7 @@ class ZonemgrRefresh:
                 if e.args[0] == errno.EINTR:
                     (rlist, wlist, xlist) = ([], [], [])
                 else:
-                    sys.stderr.write("[b10-zonemgr] Error with select(); %s\n" % e)
+                    logger.error(ZONEMGR_SELECT_ERROR, e);
                     break
 
             for fd in rlist:
@@ -365,12 +379,14 @@ class ZonemgrRefresh:
 
     def run_timer(self, daemon=False):
         """
-        Keep track of zone timers. Spawns and starts a thread. The thread object is returned.
+        Keep track of zone timers. Spawns and starts a thread. The thread object
+        is returned.
 
         You can stop it by calling shutdown().
         """
         # Small sanity check
         if self._running:
+            logger.error(ZONEMGR_TIMER_THREAD_RUNNING)
             raise RuntimeError("Trying to run the timers twice at the same time")
 
         # Prepare the launch
@@ -395,6 +411,7 @@ class ZonemgrRefresh:
         called from a different thread.
         """
         if not self._running:
+            logger.error(ZONEMGR_NO_TIMER_THREAD)
             raise RuntimeError("Trying to shutdown, but not running")
 
         # Ask the thread to stop
@@ -409,12 +426,6 @@ class ZonemgrRefresh:
 
     def update_config_data(self, new_config):
         """ update ZonemgrRefresh config """
-        # TODO: we probably want to store all this info in a nice
-        # class, so that we don't have to backup and restore every
-        # single value.
-        # TODO2: We also don't use get_default_value yet
-        backup = self._zonemgr_refresh_info.copy()
-
         # Get a new value, but only if it is defined (commonly used below)
         # We don't use "value or default", because if value would be
         # 0, we would take default
@@ -424,26 +435,21 @@ class ZonemgrRefresh:
             else:
                 return default
 
-        # store the values so we can restore them if there is a problem
-        lowerbound_refresh_backup = self._lowerbound_refresh
         self._lowerbound_refresh = val_or_default(
             new_config.get('lowerbound_refresh'), self._lowerbound_refresh)
 
-        lowerbound_retry_backup = self._lowerbound_retry
         self._lowerbound_retry = val_or_default(
             new_config.get('lowerbound_retry'), self._lowerbound_retry)
 
-        max_transfer_timeout_backup = self._max_transfer_timeout
         self._max_transfer_timeout = val_or_default(
             new_config.get('max_transfer_timeout'), self._max_transfer_timeout)
 
-        refresh_jitter_backup = self._refresh_jitter
         self._refresh_jitter = val_or_default(
             new_config.get('refresh_jitter'), self._refresh_jitter)
 
-        reload_jitter_backup = self._reload_jitter
         self._reload_jitter = val_or_default(
             new_config.get('reload_jitter'), self._reload_jitter)
+
         try:
             required = {}
             secondary_zones = new_config.get('secondary_zones')
@@ -458,6 +464,7 @@ class ZonemgrRefresh:
                     required[name_class] = True
                     # Add it only if it isn't there already
                     if not name_class in self._zonemgr_refresh_info:
+                        # If we are not able to find it in database, log an warning
                         self.zonemgr_add_zone(name_class)
                 # Drop the zones that are no longer there
                 # Do it in two phases, python doesn't like deleting while iterating
@@ -467,14 +474,7 @@ class ZonemgrRefresh:
                         to_drop.append(old_zone)
                 for drop in to_drop:
                     del self._zonemgr_refresh_info[drop]
-        # If we are not able to find it in database, restore the original
         except:
-            self._zonemgr_refresh_info = backup
-            self._lowerbound_refresh = lowerbound_refresh_backup
-            self._lowerbound_retry = lowerbound_retry_backup
-            self._max_transfer_timeout = max_transfer_timeout_backup
-            self._refresh_jitter = refresh_jitter_backup
-            self._reload_jitter = reload_jitter_backup
             raise
 
 class Zonemgr:
@@ -515,8 +515,8 @@ class Zonemgr:
         return db_file
 
     def shutdown(self):
-        """Shutdown the zonemgr process. the thread which is keeping track of zone
-        timers should be terminated.
+        """Shutdown the zonemgr process. The thread which is keeping track of
+           zone timers should be terminated.
         """
         self._zone_refresh.shutdown()
 
@@ -556,17 +556,17 @@ class Zonemgr:
         # jitter should not be bigger than half of the original value
         if config_data.get('refresh_jitter') > 0.5:
             config_data['refresh_jitter'] = 0.5
-            log_msg("[b10-zonemgr] refresh_jitter is too big, its value will "
-                      "be set to 0.5")
-
+            logger.warn(ZONEMGR_JITTER_TOO_BIG)
 
     def _parse_cmd_params(self, args, command):
         zone_name = args.get("zone_name")
         if not zone_name:
+            logger.error(ZONEMGR_NO_ZONE_NAME)
             raise ZonemgrException("zone name should be provided")
 
         zone_class = args.get("zone_class")
         if not zone_class:
+            logger.error(ZONEMGR_NO_ZONE_CLASS)
             raise ZonemgrException("zone class should be provided")
 
         if (command != ZONE_NOTIFY_COMMAND):
@@ -574,6 +574,7 @@ class Zonemgr:
 
         master_str = args.get("master")
         if not master_str:
+            logger.error(ZONEMGR_NO_MASTER_ADDRESS)
             raise ZonemgrException("master address should be provided")
 
         return ((zone_name, zone_class), master_str)
@@ -581,15 +582,16 @@ class Zonemgr:
 
     def command_handler(self, command, args):
         """Handle command receivd from command channel.
-        ZONE_NOTIFY_COMMAND is issued by Auth process; ZONE_XFRIN_SUCCESS_COMMAND
-        and ZONE_XFRIN_FAILED_COMMAND are issued by Xfrin process; shutdown is issued
-        by a user or Boss process. """
+        ZONE_NOTIFY_COMMAND is issued by Auth process;
+        ZONE_XFRIN_SUCCESS_COMMAND and ZONE_XFRIN_FAILED_COMMAND are issued by
+        Xfrin process;
+        shutdown is issued by a user or Boss process. """
         answer = create_answer(0)
         if command == ZONE_NOTIFY_COMMAND:
             """ Handle Auth notify command"""
             # master is the source sender of the notify message.
             zone_name_class, master = self._parse_cmd_params(args, command)
-            log_msg("Received notify command for zone (%s, %s)." % zone_name_class)
+            logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_NOTIFY, zone_name_class[0], zone_name_class[1])
             with self._lock:
                 self._zone_refresh.zone_handle_notify(zone_name_class, master)
             # Send notification to zonemgr timer thread
@@ -598,6 +600,7 @@ class Zonemgr:
         elif command == ZONE_XFRIN_SUCCESS_COMMAND:
             """ Handle xfrin success command"""
             zone_name_class = self._parse_cmd_params(args, command)
+            logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_SUCCESS, zone_name_class[0], zone_name_class[1])
             with self._lock:
                 self._zone_refresh.zone_refresh_success(zone_name_class)
             self._master_socket.send(b" ")# make self._slave_socket readble
@@ -605,14 +608,17 @@ class Zonemgr:
         elif command == ZONE_XFRIN_FAILED_COMMAND:
             """ Handle xfrin fail command"""
             zone_name_class = self._parse_cmd_params(args, command)
+            logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_XFRIN_FAILED, zone_name_class[0], zone_name_class[1])
             with self._lock:
                 self._zone_refresh.zone_refresh_fail(zone_name_class)
             self._master_socket.send(b" ")# make self._slave_socket readble
 
         elif command == "shutdown":
+            logger.debug(DBG_ZONEMGR_COMMAND, ZONEMGR_RECEIVE_SHUTDOWN)
             self.shutdown()
 
         else:
+            logger.warn(ZONEMGR_RECEIVE_UNKNOWN, str(command))
             answer = create_answer(1, "Unknown command:" + str(command))
 
         return answer
@@ -639,25 +645,29 @@ def set_cmd_options(parser):
 
 if '__main__' == __name__:
     try:
+        logger.debug(DBG_START_SHUT, ZONEMGR_STARTING)
         parser = OptionParser()
         set_cmd_options(parser)
         (options, args) = parser.parse_args()
-        VERBOSE_MODE = options.verbose
+        if options.verbose:
+            logger.set_severity("DEBUG", 99)
 
         set_signal_handler()
         zonemgrd = Zonemgr()
         zonemgrd.run()
     except KeyboardInterrupt:
-        sys.stderr.write("[b10-zonemgr] exit zonemgr process\n")
+        logger.info(ZONEMGR_KEYBOARD_INTERRUPT)
+
     except isc.cc.session.SessionError as e:
-        sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
-                           "is the command channel daemon running?\n")
+        logger.error(ZONEMGR_SESSION_ERROR)
+
     except isc.cc.session.SessionTimeout as e:
-        sys.stderr.write("[b10-zonemgr] Error creating zonemgr, "
-                           "is the configuration manager running?\n")
+        logger.error(ZONEMGR_SESSION_TIMEOUT)
+
     except isc.config.ModuleCCSessionError as e:
-        sys.stderr.write("[b10-zonemgr] exit zonemgr process: %s\n" % str(e))
+        logger.error(ZONEMGR_CCSESSION_ERROR, str(e))
 
     if zonemgrd and zonemgrd.running:
         zonemgrd.shutdown()
 
+    logger.debug(DBG_START_SHUT, ZONEMGR_SHUTDOWN)
diff --git a/src/bin/zonemgr/zonemgr_messages.mes b/src/bin/zonemgr/zonemgr_messages.mes
new file mode 100644
index 0000000..8abec5d
--- /dev/null
+++ b/src/bin/zonemgr/zonemgr_messages.mes
@@ -0,0 +1,145 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the zonemgr messages python module.
+
+% ZONEMGR_CCSESSION_ERROR command channel session error: %1
+An error was encountered on the command channel.  The message indicates
+the nature of the error.
+
+% ZONEMGR_JITTER_TOO_BIG refresh_jitter is too big, setting to 0.5
+The value specified in the configuration for the refresh jitter is too large
+so its value has been set to the maximum of 0.5.
+
+% ZONEMGR_KEYBOARD_INTERRUPT exiting zonemgr process as result of keyboard interrupt
+An informational message output when the zone manager was being run at a
+terminal and it was terminated via a keyboard interrupt signal.
+
+% ZONEMGR_LOAD_ZONE loading zone %1 (class %2)
+This is a debug message indicating that the zone of the specified class
+is being loaded.
+
+% ZONEMGR_NO_MASTER_ADDRESS internal BIND 10 command did not contain address of master
+A command received by the zone manager from the Auth module did not
+contain the address of the master server from which a NOTIFY message
+was received.  This may be due to an internal programming error; please
+submit a bug report.
+
+% ZONEMGR_NO_SOA zone %1 (class %2) does not have an SOA record
+When loading the named zone of the specified class the zone manager
+discovered that the data did not contain an SOA record.  The load has
+been abandoned.
+
+% ZONEMGR_NO_TIMER_THREAD trying to stop zone timer thread but it is not running
+An attempt was made to stop the timer thread (used to track when zones
+should be refreshed) but it was not running.  This may indicate an
+internal program error.  Please submit a bug report.
+
+% ZONEMGR_NO_ZONE_CLASS internal BIND 10 command did not contain class of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the class of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_NO_ZONE_NAME internal BIND 10 command did not contain name of zone
+A command received by the zone manager from another BIND 10 module did
+not contain the name of the zone on which the zone manager should act.
+This may be due to an internal programming error; please submit a
+bug report.
+
+% ZONEMGR_RECEIVE_NOTIFY received NOTIFY command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received a
+NOTIFY command over the command channel.  The command is sent by the Auth
+process when it is acting as a slave server for the zone and causes the
+zone manager to record the master server for the zone and start a timer;
+when the timer expires, the master will be polled to see if it contains
+new data.
+
+% ZONEMGR_RECEIVE_SHUTDOWN received SHUTDOWN command
+This is a debug message indicating that the zone manager has received
+a SHUTDOWN command over the command channel from the Boss process.
+It will act on this command and shut down.
+
+% ZONEMGR_RECEIVE_UNKNOWN received unknown command '%1'
+This is a warning message indicating that the zone manager has received
+the stated command over the command channel.  The command is not known
+to the zone manager and although the command is ignored, its receipt
+may indicate an internal error.  Please submit a bug report.
+
+% ZONEMGR_RECEIVE_XFRIN_FAILED received XFRIN FAILED command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN FAILED command over the command channel.  The command is sent
+by the Xfrin process when a transfer of zone data into the system has
+failed, and causes the zone manager to schedule another transfer attempt.
+
+% ZONEMGR_RECEIVE_XFRIN_SUCCESS received XFRIN SUCCESS command for zone %1 (class %2)
+This is a debug message indicating that the zone manager has received
+an XFRIN SUCCESS command over the command channel.  The command is sent
+by the Xfrin process when the transfer of zone data into the system has
+succeeded, and causes the data to be loaded and served by BIND 10.
+
+% ZONEMGR_REFRESH_ZONE refreshing zone %1 (class %2)
+The zone manager is refreshing the named zone of the specified class
+with updated information.
+
+% ZONEMGR_SELECT_ERROR error with select(): %1
+An attempt to wait for input from a socket failed.  The failing operation
+is a call to the operating system's select() function, which failed for
+the given reason.
+
+% ZONEMGR_SEND_FAIL failed to send command to %1, session has been closed
+The zone manager attempted to send a command to the named BIND 10 module,
+but the send failed.  The session between the modules has been closed.
+
+% ZONEMGR_SESSION_ERROR unable to establish session to command channel daemon
+The zonemgr process was not able to be started because it could not
+connect to the command channel daemon.  The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SESSION_TIMEOUT timeout on session to command channel daemon
+The zonemgr process was not able to be started because it timed out when
+connecting to the command channel daemon.  The most usual cause of this
+problem is that the daemon is not running.
+
+% ZONEMGR_SHUTDOWN zone manager has shut down
+A debug message, output when the zone manager has shut down completely.
+
+% ZONEMGR_STARTING zone manager starting
+A debug message output when the zone manager starts up.
+
+% ZONEMGR_TIMER_THREAD_RUNNING trying to start timer thread but one is already running
+This message is issued when an attempt is made to start the timer
+thread (which keeps track of when zones need a refresh) but one is
+already running.  It indicates either an error in the program logic or
+a problem with stopping a previous instance of the timer.  Please submit
+a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_FAIL zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has failed but the zone that was the subject of the
+operation is not being managed by the zone manager.  This may indicate
+an error in the program (as the operation should not have been initiated
+if this were the case).  Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_NOTIFIED notified zone %1 (class %2) is not known to the zone manager
+A NOTIFY was received but the zone that was the subject of the operation
+is not being managed by the zone manager.  This may indicate an error
+in the program (as the operation should not have been initiated if this
+were the case).  Please submit a bug report.
+
+% ZONEMGR_UNKNOWN_ZONE_SUCCESS zone %1 (class %2) is not known to the zone manager
+An XFRIN operation has succeeded but the zone received is not being
+managed by the zone manager.  This may indicate an error in the program
+(as the operation should not have been initiated if this were the case).
+Please submit a bug report.
diff --git a/src/cppcheck-suppress.lst b/src/cppcheck-suppress.lst
index a4fea30..1020ffe 100644
--- a/src/cppcheck-suppress.lst
+++ b/src/cppcheck-suppress.lst
@@ -3,8 +3,9 @@
 debug
 missingInclude
 // This is a template, and should be excluded from the check
-unreadVariable:src/lib/dns/rdata/template.cc:60
+unreadVariable:src/lib/dns/rdata/template.cc:61
 // Intentional self assignment tests.  Suppress warning about them.
 selfAssignment:src/lib/dns/tests/name_unittest.cc:293
 selfAssignment:src/lib/dns/tests/rdata_unittest.cc:228
 selfAssignment:src/lib/dns/tests/tsigkey_unittest.cc:137
+selfAssignment:src/lib/dns/tests/rdata_txt_like_unittest.cc:222
diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am
index c5d6b8d..9ebd541 100644
--- a/src/lib/Makefile.am
+++ b/src/lib/Makefile.am
@@ -1,3 +1,3 @@
-SUBDIRS = exceptions util log cryptolink dns cc config acl python xfr \
-          bench asiolink asiodns nsas cache resolve testutils datasrc \
-          server_common statistics
+SUBDIRS = exceptions util log cryptolink dns cc config acl xfr bench \
+          asiolink asiodns nsas cache resolve testutils datasrc \
+          server_common python dhcp statistics
diff --git a/src/lib/acl/Makefile.am b/src/lib/acl/Makefile.am
index f211025..92b7869 100644
--- a/src/lib/acl/Makefile.am
+++ b/src/lib/acl/Makefile.am
@@ -19,7 +19,7 @@ libacl_la_LIBADD += $(top_builddir)/src/lib/util/libutil.la
 # DNS specialized one
 lib_LTLIBRARIES += libdnsacl.la
 
-libdnsacl_la_SOURCES = dns.h dns.cc
+libdnsacl_la_SOURCES = dns.h dns.cc dnsname_check.h
 
 libdnsacl_la_LIBADD = libacl.la
 libdnsacl_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
diff --git a/src/lib/acl/dns.cc b/src/lib/acl/dns.cc
index cb948eb..b9cf91f 100644
--- a/src/lib/acl/dns.cc
+++ b/src/lib/acl/dns.cc
@@ -20,15 +20,20 @@
 
 #include <exceptions/exceptions.h>
 
+#include <dns/name.h>
+#include <dns/tsigrecord.h>
+
 #include <cc/data.h>
 
 #include <acl/dns.h>
 #include <acl/ip_check.h>
+#include <acl/dnsname_check.h>
 #include <acl/loader.h>
 #include <acl/logic_check.h>
 
 using namespace std;
 using boost::shared_ptr;
+using namespace isc::dns;
 using namespace isc::data;
 
 namespace isc {
@@ -39,9 +44,6 @@ namespace acl {
 /// It returns \c true if the remote (source) IP address of the request
 /// matches the expression encapsulated in the \c IPCheck, and returns
 /// \c false if not.
-///
-/// \note The match logic is expected to be extended as we add
-/// more match parameters (at least there's a plan for TSIG key).
 template <>
 bool
 IPCheck<dns::RequestContext>::matches(
@@ -53,6 +55,18 @@ IPCheck<dns::RequestContext>::matches(
 
 namespace dns {
 
+/// The specialization of \c NameCheck for access control with
+/// \c RequestContext.
+///
+/// It returns \c true if the request contains a TSIG record and its key
+/// (owner) name is equal to the name stored in the check; otherwise
+/// it returns \c false.
+template<>
+bool
+NameCheck<RequestContext>::matches(const RequestContext& request) const {
+    return (request.tsig != NULL && request.tsig->getName() == name_);
+}
+
 vector<string>
 internal::RequestCheckCreator::names() const {
     // Probably we should eventually build this vector in a more
@@ -60,6 +74,7 @@ internal::RequestCheckCreator::names() const {
     // everything.
     vector<string> supported_names;
     supported_names.push_back("from");
+    supported_names.push_back("key");
     return (supported_names);
 }
 
@@ -77,6 +92,10 @@ internal::RequestCheckCreator::create(const string& name,
     if (name == "from") {
         return (shared_ptr<internal::RequestIPCheck>(
                     new internal::RequestIPCheck(definition->stringValue())));
+    } else if (name == "key") {
+        return (shared_ptr<internal::RequestKeyCheck>(
+                    new internal::RequestKeyCheck(
+                        Name(definition->stringValue()))));
     } else {
         // This case shouldn't happen (normally) as it should have been
         // rejected at the loader level.  But we explicitly catch the case
diff --git a/src/lib/acl/dns.h b/src/lib/acl/dns.h
index 118e5fd..d08fcf3 100644
--- a/src/lib/acl/dns.h
+++ b/src/lib/acl/dns.h
@@ -23,9 +23,13 @@
 #include <cc/data.h>
 
 #include <acl/ip_check.h>
+#include <acl/dnsname_check.h>
 #include <acl/loader.h>
 
 namespace isc {
+namespace dns {
+class TSIGRecord;
+}
 namespace acl {
 namespace dns {
 
@@ -53,9 +57,9 @@ namespace dns {
  * used only for a very short period as stated above.
  *
  * Based on the minimalist philosophy, the initial implementation only
- * maintains the remote (source) IP address of the request.  The plan is
- * to add more parameters of the request.  A scheduled next step is to
- * support the TSIG key (if it's included in the request).  Other possibilities
+ * maintains the remote (source) IP address of the request and (optionally)
+ * the TSIG record included in the request.  We may add more parameters of
+ * the request as we see the need for them.  Possible additional parameters
  * are the local (destination) IP address, the remote and local port numbers,
  * various fields of the DNS request (e.g. a particular header flag value).
  */
@@ -67,9 +71,13 @@ struct RequestContext {
     ///
     /// \exception None
     ///
-    /// \parameter remote_address_param The remote IP address
-    explicit RequestContext(const IPAddress& remote_address_param) :
-        remote_address(remote_address_param)
+    /// \param remote_address_param The remote IP address
+    /// \param tsig_param A valid pointer to the TSIG record included in
+    /// the request or NULL if the request doesn't contain a TSIG.
+    RequestContext(const IPAddress& remote_address_param,
+                   const isc::dns::TSIGRecord* tsig_param) :
+        remote_address(remote_address_param),
+        tsig(tsig_param)
     {}
 
     ///
@@ -83,6 +91,11 @@ struct RequestContext {
     //@{
     /// \brief The remote IP address (eg. the client's IP address).
     const IPAddress& remote_address;
+
+    /// \brief The TSIG record included in the request message, if any.
+    ///
+    /// If the request doesn't include a TSIG, this member will be NULL.
+    const isc::dns::TSIGRecord* const tsig;
     //@}
 };
 
@@ -114,6 +127,7 @@ namespace internal {
 
 // Shortcut typedef
 typedef isc::acl::IPCheck<RequestContext> RequestIPCheck;
+typedef isc::acl::dns::NameCheck<RequestContext> RequestKeyCheck;
 
 class RequestCheckCreator : public acl::Loader<RequestContext>::CheckCreator {
 public:
diff --git a/src/lib/acl/dnsname_check.h b/src/lib/acl/dnsname_check.h
new file mode 100644
index 0000000..7498d99
--- /dev/null
+++ b/src/lib/acl/dnsname_check.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DNSNAME_CHECK_H
+#define __DNSNAME_CHECK_H 1
+
+#include <dns/name.h>
+
+#include <acl/check.h>
+
+namespace isc {
+namespace acl {
+namespace dns {
+
+/// ACL check for DNS names
+///
+/// This class is intended to perform a match between a domain name
+/// specified in an ACL and a given name.  The primary usage of this class
+/// is an ACL match for TSIG keys, where an ACL would contain a list of
+/// acceptable key names and the \c match() method would compare the owner
+/// name of a TSIG record against the specified names.
+///
+/// This class could be used for other kinds of names such as the query name
+/// of normal DNS queries.
+///
+/// The class is templated on the type of a context structure passed to the
+/// matches() method, and a template specialisation for that method must be
+/// supplied for the class to be used.
+template <typename Context>
+class NameCheck : public Check<Context> {
+public:
+    /// The constructor
+    ///
+    /// \exception std::bad_alloc Resource allocation fails in copying the
+    /// name
+    ///
+    /// \param name The domain name to be matched in \c matches().
+    NameCheck(const isc::dns::Name& name) : name_(name) {}
+
+    /// Destructor
+    virtual ~NameCheck() {}
+
+    /// The check method
+    ///
+    /// Matches the passed argument to the condition stored here.  Different
+    /// specializations must be provided for different argument types, and the
+    /// program will fail to compile if a required specialisation is not
+    /// provided.
+    ///
+    /// \param context Information to be matched
+    virtual bool matches(const Context& context) const;
+
+    /// Returns the name specified on construction.
+    ///
+    /// This is mainly for testing purposes.
+    ///
+    /// \exception None
+    const isc::dns::Name& getName() const { return (name_); }
+
+private:
+    const isc::dns::Name name_;
+};
+
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+#endif // __DNSNAME_CHECK_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/acl/loader.h b/src/lib/acl/loader.h
index f60b144..fc69b44 100644
--- a/src/lib/acl/loader.h
+++ b/src/lib/acl/loader.h
@@ -125,7 +125,7 @@ BasicAction defaultActionLoader(data::ConstElementPtr action);
  *
  * The rest of the element are matches. The left side is the name of the
  * match type (for example match for source IP address or match for message
- * size). The <parameter> is whatever is needed to describe the match and
+ * size). The parameter is whatever is needed to describe the match and
  * depends on the match type, the loader passes it verbatim to creator
  * of that match type.
  *
@@ -148,7 +148,7 @@ public:
     /**
      * \brief Constructor.
      *
-     * \param default_action The default action for created ACLs.
+     * \param defaultAction The default action for created ACLs.
      * \param actionLoader is the loader which will be used to convert actions
      *     from their JSON representation. The default value is suitable for
      *     the BasicAction enum. If you did not specify the second
@@ -202,7 +202,7 @@ public:
          *     parameters might look like, they are not checked in any way.
          *     Therefore it's up to the creator (or the check being created)
          *     to validate the data and throw if it is bad.
-         * \param Current loader calling this creator. This can be used
+         * \param loader Current loader calling this creator. This can be used
          *     to load subexpressions in case of compound check.
          */
         virtual boost::shared_ptr<Check<Context> > create(
diff --git a/src/lib/acl/tests/Makefile.am b/src/lib/acl/tests/Makefile.am
index ce1aec5..6369511 100644
--- a/src/lib/acl/tests/Makefile.am
+++ b/src/lib/acl/tests/Makefile.am
@@ -16,6 +16,7 @@ run_unittests_SOURCES += acl_test.cc
 run_unittests_SOURCES += check_test.cc
 run_unittests_SOURCES += dns_test.cc
 run_unittests_SOURCES += ip_check_unittest.cc
+run_unittests_SOURCES += dnsname_check_unittest.cc
 run_unittests_SOURCES += loader_test.cc
 run_unittests_SOURCES += logcheck.h
 run_unittests_SOURCES += creators.h
@@ -30,6 +31,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.
 run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
+run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/acl/libdnsacl.la
diff --git a/src/lib/acl/tests/dns_test.cc b/src/lib/acl/tests/dns_test.cc
index 3a42af0..b3ddbf4 100644
--- a/src/lib/acl/tests/dns_test.cc
+++ b/src/lib/acl/tests/dns_test.cc
@@ -23,6 +23,11 @@
 
 #include <exceptions/exceptions.h>
 
+#include <dns/name.h>
+#include <dns/tsigkey.h>
+#include <dns/tsigrecord.h>
+#include <dns/rdataclass.h>
+
 #include <cc/data.h>
 #include <acl/dns.h>
 #include <acl/loader.h>
@@ -35,6 +40,8 @@
 
 using namespace std;
 using boost::scoped_ptr;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
 using namespace isc::data;
 using namespace isc::acl;
 using namespace isc::acl::dns;
@@ -64,8 +71,10 @@ protected:
 };
 
 TEST_F(RequestCheckCreatorTest, names) {
-    ASSERT_EQ(1, creator_.names().size());
-    EXPECT_EQ("from", creator_.names()[0]);
+    const vector<string> names = creator_.names();
+    EXPECT_EQ(2, names.size());
+    EXPECT_TRUE(find(names.begin(), names.end(), "from") != names.end());
+    EXPECT_TRUE(find(names.begin(), names.end(), "key") != names.end());
 }
 
 TEST_F(RequestCheckCreatorTest, allowListAbbreviation) {
@@ -93,11 +102,11 @@ TEST_F(RequestCheckCreatorTest, createIPv6Check) {
     check_ = creator_.create("from",
                              Element::fromJSON("\"2001:db8::5300/120\""),
                              getRequestLoader());
-    const dns::internal::RequestIPCheck& ipcheck_ =
+    const dns::internal::RequestIPCheck& ipcheck =
         dynamic_cast<const dns::internal::RequestIPCheck&>(*check_);
-    EXPECT_EQ(AF_INET6, ipcheck_.getFamily());
-    EXPECT_EQ(120, ipcheck_.getPrefixlen());
-    const vector<uint8_t> check_address(ipcheck_.getAddress());
+    EXPECT_EQ(AF_INET6, ipcheck.getFamily());
+    EXPECT_EQ(120, ipcheck.getPrefixlen());
+    const vector<uint8_t> check_address(ipcheck.getAddress());
     ASSERT_EQ(16, check_address.size());
     const uint8_t expected_address[] = { 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00,
                                          0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -106,6 +115,14 @@ TEST_F(RequestCheckCreatorTest, createIPv6Check) {
                       expected_address));
 }
 
+TEST_F(RequestCheckCreatorTest, createTSIGKeyCheck) {
+    check_ = creator_.create("key", Element::fromJSON("\"key.example.com\""),
+                             getRequestLoader());
+    const dns::internal::RequestKeyCheck& keycheck =
+        dynamic_cast<const dns::internal::RequestKeyCheck&>(*check_);
+    EXPECT_EQ(Name("key.example.com"), keycheck.getName());
+}
+
 TEST_F(RequestCheckCreatorTest, badCreate) {
     // Invalid name
     EXPECT_THROW(creator_.create("bad", Element::fromJSON("\"192.0.2.1\""),
@@ -118,12 +135,23 @@ TEST_F(RequestCheckCreatorTest, badCreate) {
     EXPECT_THROW(creator_.create("from", Element::fromJSON("[]"),
                                  getRequestLoader()),
                  isc::data::TypeError);
+    EXPECT_THROW(creator_.create("key", Element::fromJSON("1"),
+                                 getRequestLoader()),
+                 isc::data::TypeError);
+    EXPECT_THROW(creator_.create("key", Element::fromJSON("{}"),
+                                 getRequestLoader()),
+                 isc::data::TypeError);
 
     // Syntax error for IPCheck
     EXPECT_THROW(creator_.create("from", Element::fromJSON("\"bad\""),
                                  getRequestLoader()),
                  isc::InvalidParameter);
 
+    // Syntax error for Name (key) Check
+    EXPECT_THROW(creator_.create("key", Element::fromJSON("\"bad..name\""),
+                                 getRequestLoader()),
+                 EmptyLabel);
+
     // NULL pointer
     EXPECT_THROW(creator_.create("from", ConstElementPtr(), getRequestLoader()),
                  LoaderError);
@@ -140,23 +168,43 @@ protected:
                                 getRequestLoader()));
     }
 
+    // A helper shortcut to create a single Name (key) check for the given
+    // name.
+    ConstRequestCheckPtr createKeyCheck(const string& key_name) {
+        return (creator_.create("key", Element::fromJSON(
+                                    string("\"") + key_name + string("\"")),
+                                getRequestLoader()));
+    }
+
     // create a one time request context for a specific test.  Note that
     // getSockaddr() uses a static storage, so it cannot be called more than
     // once in a single test.
-    const dns::RequestContext& getRequest4() {
+    const dns::RequestContext& getRequest4(const TSIGRecord* tsig = NULL) {
         ipaddr.reset(new IPAddress(tests::getSockAddr("192.0.2.1")));
-        request.reset(new dns::RequestContext(*ipaddr));
+        request.reset(new dns::RequestContext(*ipaddr, tsig));
         return (*request);
     }
-    const dns::RequestContext& getRequest6() {
+    const dns::RequestContext& getRequest6(const TSIGRecord* tsig = NULL) {
         ipaddr.reset(new IPAddress(tests::getSockAddr("2001:db8::1")));
-        request.reset(new dns::RequestContext(*ipaddr));
+        request.reset(new dns::RequestContext(*ipaddr, tsig));
         return (*request);
     }
 
+    // create a one time TSIG Record for a specific test.  The only parameter
+    // of the record that matters is the key name; others are hardcoded with
+    // arbitrarily chosen values.
+    const TSIGRecord* getTSIGRecord(const string& key_name) {
+        tsig_rdata.reset(new any::TSIG(TSIGKey::HMACMD5_NAME(), 0, 0, 0, NULL,
+                                       0, 0, 0, NULL));
+        tsig.reset(new TSIGRecord(Name(key_name), *tsig_rdata));
+        return (tsig.get());
+    }
+
 private:
     scoped_ptr<IPAddress> ipaddr;
     scoped_ptr<dns::RequestContext> request;
+    scoped_ptr<any::TSIG> tsig_rdata;
+    scoped_ptr<TSIGRecord> tsig;
     dns::internal::RequestCheckCreator creator_;
 };
 
@@ -184,6 +232,24 @@ TEST_F(RequestCheckTest, checkIPv6) {
     EXPECT_FALSE(createIPCheck("32.1.13.184")->matches(getRequest6()));
 }
 
+TEST_F(RequestCheckTest, checkTSIGKey) {
+    EXPECT_TRUE(createKeyCheck("key.example.com")->matches(
+                    getRequest4(getTSIGRecord("key.example.com"))));
+    EXPECT_FALSE(createKeyCheck("key.example.com")->matches(
+                     getRequest4(getTSIGRecord("badkey.example.com"))));
+
+    // Same for IPv6 (which shouldn't matter)
+    EXPECT_TRUE(createKeyCheck("key.example.com")->matches(
+                    getRequest6(getTSIGRecord("key.example.com"))));
+    EXPECT_FALSE(createKeyCheck("key.example.com")->matches(
+                     getRequest6(getTSIGRecord("badkey.example.com"))));
+
+    // by default the test request doesn't have a TSIG key, which shouldn't
+    // match any key checks.
+    EXPECT_FALSE(createKeyCheck("key.example.com")->matches(getRequest4()));
+    EXPECT_FALSE(createKeyCheck("key.example.com")->matches(getRequest6()));
+}
+
 // The following tests test only the creators are registered, they are tested
 // elsewhere
 
diff --git a/src/lib/acl/tests/dnsname_check_unittest.cc b/src/lib/acl/tests/dnsname_check_unittest.cc
new file mode 100644
index 0000000..95b5314
--- /dev/null
+++ b/src/lib/acl/tests/dnsname_check_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/name.h>
+
+#include <acl/dnsname_check.h>
+
+using namespace isc::dns;
+using namespace isc::acl::dns;
+
+// Provide a specialization of the DNSNameCheck::matches() method.
+namespace isc  {
+namespace acl {
+namespace dns {
+template <>
+bool NameCheck<Name>::matches(const Name& name) const {
+    return (name_ == name);
+}
+} // namespace dns
+} // namespace acl
+} // namespace isc
+
+namespace {
+TEST(DNSNameCheck, construct) {
+    EXPECT_EQ(Name("example.com"),
+              NameCheck<Name>(Name("example.com")).getName());
+
+    // Construct the same check with an explicit trailing dot.  Should result
+    // in the same result.
+    EXPECT_EQ(Name("example.com"),
+              NameCheck<Name>(Name("example.com.")).getName());
+}
+
+TEST(DNSNameCheck, match) {
+    NameCheck<Name> check(Name("example.com"));
+    EXPECT_TRUE(check.matches(Name("example.com")));
+    EXPECT_FALSE(check.matches(Name("example.org")));
+
+    // comparison is case insensitive
+    EXPECT_TRUE(check.matches(Name("EXAMPLE.COM")));
+
+    // this is exact match.  so super/sub domains don't match
+    EXPECT_FALSE(check.matches(Name("com")));
+    EXPECT_FALSE(check.matches(Name("www.example.com")));
+}
+} // Unnamed namespace
diff --git a/src/lib/asiodns/asiodns_messages.mes b/src/lib/asiodns/asiodns_messages.mes
index 3e11ede..feb75d4 100644
--- a/src/lib/asiodns/asiodns_messages.mes
+++ b/src/lib/asiodns/asiodns_messages.mes
@@ -26,13 +26,13 @@ enabled.
 % ASIODNS_OPEN_SOCKET error %1 opening %2 socket to %3(%4)
 The asynchronous I/O code encountered an error when trying to open a socket
 of the specified protocol in order to send a message to the target address.
-The number of the system error that cause the problem is given in the
+The number of the system error that caused the problem is given in the
 message.
 
 % ASIODNS_READ_DATA error %1 reading %2 data from %3(%4)
 The asynchronous I/O code encountered an error when trying to read data from
 the specified address on the given protocol.  The number of the system
-error that cause the problem is given in the message.
+error that caused the problem is given in the message.
 
 % ASIODNS_READ_TIMEOUT receive timeout while waiting for data from %1(%2)
 An upstream fetch from the specified address timed out.  This may happen for
@@ -41,9 +41,9 @@ or a problem on the network.  The message will only appear if debug is
 enabled.
 
 % ASIODNS_SEND_DATA error %1 sending data using %2 to %3(%4)
-The asynchronous I/O code encountered an error when trying send data to
-the specified address on the given protocol.  The the number of the system
-error that cause the problem is given in the message.
+The asynchronous I/O code encountered an error when trying to send data to
+the specified address on the given protocol.  The number of the system
+error that caused the problem is given in the message.
 
 % ASIODNS_UNKNOWN_ORIGIN unknown origin for ASIO error code %1 (protocol: %2, address %3)
 An internal consistency check on the origin of a message from the
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index 31b5f50..466be3e 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -61,17 +61,13 @@ namespace asiodns {
 
 /// Use the ASIO logger
 
-namespace {
-
 isc::log::Logger logger("asiolink");
+
 // Log debug verbosity
-enum {
-    DBG_IMPORTANT = 1,
-    DBG_COMMON = 20,
-    DBG_ALL = 50
-};
 
-}
+const int DBG_IMPORTANT = DBGLVL_TRACE_BASIC;
+const int DBG_COMMON = DBGLVL_TRACE_DETAIL;
+const int DBG_ALL = DBGLVL_TRACE_DETAIL + 20;
 
 /// \brief IOFetch Data
 ///
diff --git a/src/lib/asiolink/Makefile.am b/src/lib/asiolink/Makefile.am
index 22b3a8e..07c3e13 100644
--- a/src/lib/asiolink/Makefile.am
+++ b/src/lib/asiolink/Makefile.am
@@ -7,10 +7,16 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
 
 CLEANFILES = *.gcno *.gcda
 
-# This is a wrapper library solely used for b10-auth.  The ASIO header files
-# have some code fragments that would hit gcc's unused-parameter warning,
-# which would make the build fail with -Werror (our default setting).
+# This is a wrapper library.
+
+# The ASIO header files have some code fragments that would hit
+# gcc's unused-parameter warning, which would make the build fail
+# with -Werror (our default setting).
+
 lib_LTLIBRARIES = libasiolink.la
+
+libasiolink_la_LDFLAGS = -no-undefined -version-info 1:0:1
+
 libasiolink_la_SOURCES  = asiolink.h
 libasiolink_la_SOURCES += dummy_io_cb.h
 libasiolink_la_SOURCES += interval_timer.cc interval_timer.h
diff --git a/src/lib/asiolink/dummy_io_cb.h b/src/lib/asiolink/dummy_io_cb.h
index 2081906..bcaefe9 100644
--- a/src/lib/asiolink/dummy_io_cb.h
+++ b/src/lib/asiolink/dummy_io_cb.h
@@ -39,7 +39,8 @@ public:
 
     /// \brief Asynchronous I/O callback method
     ///
-    /// \param error Unused
+    /// TODO: explain why this method should never be called.
+    /// This should be unused.
     void operator()(asio::error_code)
     {
         // TODO: log an error if this method ever gets called.
@@ -47,8 +48,8 @@ public:
 
     /// \brief Asynchronous I/O callback method
     ///
-    /// \param error Unused
-    /// \param length Unused
+    /// TODO: explain why this method should never be called.
+    /// This should be unused.
     void operator()(asio::error_code, size_t)
     {
         // TODO: log an error if this method ever gets called.
diff --git a/src/lib/asiolink/io_address.cc b/src/lib/asiolink/io_address.cc
index 7f7a6fc..0fe1db4 100644
--- a/src/lib/asiolink/io_address.cc
+++ b/src/lib/asiolink/io_address.cc
@@ -15,6 +15,7 @@
 #include <config.h>
 
 #include <unistd.h>             // for some IPC/network system calls
+#include <stdint.h>
 #include <sys/socket.h>
 #include <netinet/in.h>
 
@@ -23,7 +24,7 @@
 #include <exceptions/exceptions.h>
 #include <asiolink/io_address.h>
 #include <asiolink/io_error.h>
-
+#include <boost/static_assert.hpp>
 
 using namespace asio;
 using asio::ip::udp;
@@ -49,11 +50,32 @@ IOAddress::IOAddress(const ip::address& asio_address) :
     asio_address_(asio_address)
 {}
 
+IOAddress::IOAddress(uint32_t v4address):
+    asio_address_(asio::ip::address_v4(v4address)) {
+
+}
+
 string
 IOAddress::toText() const {
     return (asio_address_.to_string());
 }
 
+IOAddress
+IOAddress::from_bytes(short family, const uint8_t* data) {
+    if (data == NULL) {
+        isc_throw(BadValue, "NULL pointer received.");
+    } else
+    if ( (family != AF_INET) && (family != AF_INET6) ) {
+        isc_throw(BadValue, "Invalid family type. Only AF_INET and AF_INET6"
+                  << "are supported");
+    }
+
+    BOOST_STATIC_ASSERT(INET6_ADDRSTRLEN >= INET_ADDRSTRLEN);
+    char addr_str[INET6_ADDRSTRLEN];
+    inet_ntop(family, data, addr_str, INET6_ADDRSTRLEN);
+    return IOAddress(string(addr_str));
+}
+
 short
 IOAddress::getFamily() const {
     if (asio_address_.is_v4()) {
@@ -63,5 +85,19 @@ IOAddress::getFamily() const {
     }
 }
 
+const asio::ip::address&
+IOAddress::getAddress() const {
+    return asio_address_;
+}
+
+IOAddress::operator uint32_t() const {
+    if (getAddress().is_v4()) {
+        return (getAddress().to_v4().to_ulong());
+    } else {
+        isc_throw(BadValue, "Can't convert " << toText()
+                  << " address to IPv4.");
+    }
+}
+
 } // namespace asiolink
 } // namespace isc
diff --git a/src/lib/asiolink/io_address.h b/src/lib/asiolink/io_address.h
index 655b727..c40e5b9 100644
--- a/src/lib/asiolink/io_address.h
+++ b/src/lib/asiolink/io_address.h
@@ -19,6 +19,7 @@
 // this file.  In particular, asio.hpp should never be included here.
 // See the description of the namespace below.
 #include <unistd.h>             // for some network system calls
+#include <stdint.h>             // for uint32_t
 #include <asio/ip/address.hpp>
 
 #include <functional>
@@ -29,6 +30,12 @@
 namespace isc {
 namespace asiolink {
 
+    /// Defines length of IPv6 address.
+    const static size_t V6ADDRESS_LEN = 16;
+
+    /// Defines length of IPv4 address.
+    const static size_t V4ADDRESS_LEN = 4;
+
 /// \brief The \c IOAddress class represents an IP addresses (version
 /// agnostic)
 ///
@@ -65,6 +72,15 @@ public:
     IOAddress(const asio::ip::address& asio_address);
     //@}
 
+    /// @brief Constructor for ip::address_v4 object.
+    ///
+    /// This constructor is intented to be used when constructing
+    /// IPv4 address out of uint32_t type. Passed value must be in
+    /// network byte order
+    ///
+    /// @param v4address IPv4 address represnted by uint32_t
+    IOAddress(uint32_t v4address);
+
     /// \brief Convert the address to a string.
     ///
     /// This method is basically expected to be exception free, but
@@ -74,11 +90,29 @@ public:
     /// \return A string representation of the address.
     std::string toText() const;
 
+    /// \brief Returns const reference to the underlying address object.
+    ///
+    /// This is useful, when access to interface offerted by
+    //  asio::ip::address_v4 and asio::ip::address_v6 is beneficial.
+    /// 
+    /// \return A const reference to asio::ip::address object
+    const asio::ip::address& getAddress() const;
+
     /// \brief Returns the address family
     ///
     /// \return AF_INET for IPv4 or AF_INET6 for IPv6.
     short getFamily() const;
 
+
+    /// \brief Creates an address from over wire data.
+    ///
+    /// \param family AF_NET for IPv4 or AF_NET6 for IPv6.
+    /// \param data pointer to first char of data
+    ///
+    /// \return Created IOAddress object
+    static IOAddress
+    from_bytes(short family, const uint8_t* data);
+
     /// \brief Compare addresses for equality
     ///
     /// \param other Address to compare against.
@@ -115,6 +149,14 @@ public:
         return (nequals(other));
     }
 
+    /// \brief Converts IPv4 address to uint32_t
+    ///
+    /// Will throw BadValue exception if that is not IPv4
+    /// address.
+    ///
+    /// \return uint32_t that represents IPv4 address in
+    ///         network byte order
+    operator uint32_t () const;
 
 private:
     asio::ip::address asio_address_;
diff --git a/src/lib/asiolink/io_asio_socket.h b/src/lib/asiolink/io_asio_socket.h
index 864708c..aeac63d 100644
--- a/src/lib/asiolink/io_asio_socket.h
+++ b/src/lib/asiolink/io_asio_socket.h
@@ -82,8 +82,6 @@ class IOEndpoint;
 /// derived class for testing purposes rather than providing factory methods
 /// (i.e., getDummy variants below).
 ///
-/// TODO: Check if IOAsioSocket class is still needed
-///
 /// \param C Template parameter identifying type of the callback object.
 
 template <typename C>
@@ -328,10 +326,9 @@ public:
     ///
     /// A call that is a no-op on UDP sockets, this opens a connection to the
     /// system identified by the given endpoint.
+    /// The endpoint and callback are unused.
     ///
-    /// \param endpoint Unused
-    /// \param callback Unused.
-    ///false indicating that the operation completed synchronously.
+    /// \return false indicating that the operation completed synchronously.
     virtual bool open(const IOEndpoint*, C&) {
         return (false);
     }
@@ -339,23 +336,14 @@ public:
     /// \brief Send Asynchronously
     ///
     /// Must be supplied as it is abstract in the base class.
-    ///
-    /// \param data Unused
-    /// \param length Unused
-    /// \param endpoint Unused
-    /// \param callback Unused
+    /// This is unused.
     virtual void asyncSend(const void*, size_t, const IOEndpoint*, C&) {
     }
 
     /// \brief Receive Asynchronously
     ///
     /// Must be supplied as it is abstract in the base class.
-    ///
-    /// \param data Unused
-    /// \param length Unused
-    /// \param offset Unused
-    /// \param endpoint Unused
-    /// \param callback Unused
+    /// The parameters are unused.
     virtual void asyncReceive(void* data, size_t, size_t, IOEndpoint*, C&) {
     }
 
diff --git a/src/lib/asiolink/tests/io_address_unittest.cc b/src/lib/asiolink/tests/io_address_unittest.cc
index 18b181e..4322283 100644
--- a/src/lib/asiolink/tests/io_address_unittest.cc
+++ b/src/lib/asiolink/tests/io_address_unittest.cc
@@ -18,6 +18,8 @@
 #include <asiolink/io_error.h>
 #include <asiolink/io_address.h>
 
+#include <cstring>
+
 using namespace isc::asiolink;
 
 TEST(IOAddressTest, fromText) {
@@ -61,3 +63,39 @@ TEST(IOAddressTest, Family) {
     EXPECT_EQ(AF_INET, IOAddress("192.0.2.1").getFamily());
     EXPECT_EQ(AF_INET6, IOAddress("2001:0DB8:0:0::0012").getFamily());
 }
+
+TEST(IOAddressTest, from_bytes) {
+    // 2001:db8:1::dead:beef
+    uint8_t v6[] = {
+        0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+        0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef };
+
+    uint8_t v4[] = { 192, 0 , 2, 3 };
+
+    IOAddress addr("::");
+    EXPECT_NO_THROW({
+        addr = IOAddress::from_bytes(AF_INET6, v6);
+    });
+    EXPECT_EQ("2001:db8:1::dead:beef", addr.toText());
+
+    EXPECT_NO_THROW({
+        addr = IOAddress::from_bytes(AF_INET, v4);
+    });
+    EXPECT_EQ(addr.toText(), IOAddress("192.0.2.3").toText());
+}
+
+TEST(IOAddressTest, uint32) {
+    IOAddress addr1("192.0.2.5");
+
+    // operator uint_32() is used here
+    uint32_t tmp = addr1;
+
+    uint32_t expected = (192U << 24) +  (0U << 16) + (2U << 8) + 5U;
+
+    EXPECT_EQ(expected, tmp);
+
+    // now let's try opposite conversion
+    IOAddress addr3 = IOAddress(expected);
+
+    EXPECT_EQ(addr3.toText(), "192.0.2.5");
+}
diff --git a/src/lib/asiolink/tests/io_endpoint_unittest.cc b/src/lib/asiolink/tests/io_endpoint_unittest.cc
index f0279d1..c7283ec 100644
--- a/src/lib/asiolink/tests/io_endpoint_unittest.cc
+++ b/src/lib/asiolink/tests/io_endpoint_unittest.cc
@@ -219,7 +219,7 @@ sockAddrMatch(const struct sockaddr& actual_sa,
     res->ai_addr->sa_len = actual_sa.sa_len;
 #endif
     EXPECT_EQ(0, memcmp(res->ai_addr, &actual_sa, res->ai_addrlen));
-    free(res);
+    freeaddrinfo(res);
 }
 
 TEST(IOEndpointTest, getSockAddr) {
diff --git a/src/lib/bench/Makefile.am b/src/lib/bench/Makefile.am
index 866404f..514b3b3 100644
--- a/src/lib/bench/Makefile.am
+++ b/src/lib/bench/Makefile.am
@@ -6,6 +6,6 @@ AM_CXXFLAGS = $(B10_CXXFLAGS)
 
 CLEANFILES = *.gcno *.gcda
 
-lib_LTLIBRARIES = libbench.la
+noinst_LTLIBRARIES = libbench.la
 libbench_la_SOURCES = benchmark_util.h benchmark_util.cc
 EXTRA_DIST = benchmark.h
diff --git a/src/lib/bench/tests/Makefile.am b/src/lib/bench/tests/Makefile.am
index 3ebdf29..3f8a678 100644
--- a/src/lib/bench/tests/Makefile.am
+++ b/src/lib/bench/tests/Makefile.am
@@ -16,6 +16,7 @@ run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
 run_unittests_LDADD  = $(top_builddir)/src/lib/bench/libbench.la
 run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(GTEST_LDADD)
diff --git a/src/lib/cache/cache_messages.mes b/src/lib/cache/cache_messages.mes
index 2a68cc2..19102ae 100644
--- a/src/lib/cache/cache_messages.mes
+++ b/src/lib/cache/cache_messages.mes
@@ -124,14 +124,14 @@ the message will not be cached.
 Debug message. The requested data was found in the RRset cache. However, it is
 expired, so the cache removed it and is going to pretend nothing was found.
 
-% CACHE_RRSET_INIT initializing RRset cache for %2 RRsets of class %1
+% CACHE_RRSET_INIT initializing RRset cache for %1 RRsets of class %2
 Debug message. The RRset cache to hold at most this many RRsets for the given
 class is being created.
 
 % CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache
 Debug message. The resolver is trying to look up data in the RRset cache.
 
-% CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3
+% CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache
 Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
 in the cache.
 
diff --git a/src/lib/cache/logger.h b/src/lib/cache/logger.h
index 8159ed4..52c9743 100644
--- a/src/lib/cache/logger.h
+++ b/src/lib/cache/logger.h
@@ -18,7 +18,7 @@
 #include <log/macros.h>
 #include <cache/cache_messages.h>
 
-/// \file logger.h
+/// \file cache/logger.h
 /// \brief Cache library global logger
 ///
 /// This holds the logger for the cache library. It is a private header
@@ -31,14 +31,13 @@ namespace cache {
 /// \brief The logger for this library
 extern isc::log::Logger logger;
 
-enum {
-    /// \brief Trace basic operations
-    DBG_TRACE_BASIC = 10,
-    /// \brief Trace data operations
-    DBG_TRACE_DATA = 40,
-};
+/// \brief Trace basic operations
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
 
-}
-}
+/// \brief Trace data operations
+const int DBG_TRACE_DATA = DBGLVL_TRACE_BASIC_DATA;
+
+} // namespace cache
+} // namespace isc
 
 #endif
diff --git a/src/lib/cache/message_cache.h b/src/lib/cache/message_cache.h
index 44d7fd1..b418f23 100644
--- a/src/lib/cache/message_cache.h
+++ b/src/lib/cache/message_cache.h
@@ -52,6 +52,8 @@ public:
     virtual ~MessageCache();
 
     /// \brief Look up message in cache.
+    /// \param qname Name of the domain for which the message is being sought.
+    /// \param qtype Type of the RR for which the message is being sought.
     /// \param message generated response message if the message entry
     ///        can be found.
     ///
diff --git a/src/lib/cache/resolver_cache.h b/src/lib/cache/resolver_cache.h
index 9ad4388..5630bd7 100644
--- a/src/lib/cache/resolver_cache.h
+++ b/src/lib/cache/resolver_cache.h
@@ -89,8 +89,8 @@ public:
     ResolverClassCache(const isc::dns::RRClass& cache_class);
 
     /// \brief Construct Function.
-    /// \param caches_size cache size information for each
-    ///        messages/rrsets of different classes.
+    /// \param cache_info Cache size information for each message/rrsets of
+    ///        different classes.
     ResolverClassCache(const CacheSizeInfo& cache_info);
 
     /// \name Lookup Interfaces
diff --git a/src/lib/cache/rrset_entry.h b/src/lib/cache/rrset_entry.h
index 5fa8f2c..09cf79c 100644
--- a/src/lib/cache/rrset_entry.h
+++ b/src/lib/cache/rrset_entry.h
@@ -27,9 +27,9 @@ using namespace isc::nsas;
 namespace isc {
 namespace cache {
 
-/// \enum RRset Trustworthiness
+/// \enum RRsetTrustLevel
 /// For detail of RRset trustworthiness, please refer to
-/// RFC2181 section5.4.1.
+/// RFC 2181 section 5.4.1.
 /// Bigger value is more trustworthy.
 enum RRsetTrustLevel {
     /// Default trust for RRset.
diff --git a/src/lib/cache/tests/Makefile.am b/src/lib/cache/tests/Makefile.am
index f9237af..a215c56 100644
--- a/src/lib/cache/tests/Makefile.am
+++ b/src/lib/cache/tests/Makefile.am
@@ -56,6 +56,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
 run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
 run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/cc/cc_messages.mes b/src/lib/cc/cc_messages.mes
index 8c62ea1..8370cdd 100644
--- a/src/lib/cc/cc_messages.mes
+++ b/src/lib/cc/cc_messages.mes
@@ -53,11 +53,11 @@ Debug message, we're about to send a message over the command channel.
 This happens when garbage comes over the command channel or some kind of
 confusion happens in the program. The data received from the socket make no
 sense if we interpret it as lengths of message. The first one is total length
-of message, the second length of the header. The header and it's length
-(2 bytes) is counted in the total length.
+of the message; the second is the length of the header. The header
+and its length (2 bytes) is counted in the total length.
 
 % CC_LENGTH_NOT_READY length not ready
-There should be data representing length of message on the socket, but it
+There should be data representing the length of message on the socket, but it
 is not there.
 
 % CC_NO_MESSAGE no message ready to be received yet
diff --git a/src/lib/cc/data.cc b/src/lib/cc/data.cc
index a455d43..ffa5346 100644
--- a/src/lib/cc/data.cc
+++ b/src/lib/cc/data.cc
@@ -511,6 +511,8 @@ Element::nameToType(const std::string& type_name) {
         return (Element::list);
     } else if (type_name == "map") {
         return (Element::map);
+    } else if (type_name == "named_set") {
+        return (Element::map);
     } else if (type_name == "null") {
         return (Element::null);
     } else if (type_name == "any") {
diff --git a/src/lib/cc/logger.h b/src/lib/cc/logger.h
index 567ccee..d6253d0 100644
--- a/src/lib/cc/logger.h
+++ b/src/lib/cc/logger.h
@@ -18,7 +18,7 @@
 #include <cc/cc_messages.h>
 #include <log/macros.h>
 
-/// \file logger.h
+/// \file cc/logger.h
 /// \brief Command Channel library global logger
 ///
 /// This holds the logger for the CC library. It is a private header
@@ -28,20 +28,19 @@
 namespace isc {
 namespace cc {
 
-enum {
-    /// \brief Trace basic operation
-    DBG_TRACE_BASIC = 10,
-    /// \brief Trace even details
-    ///
-    /// This includes messages being sent and received, waiting for messages
-    /// and alike.
-    DBG_TRACE_DETAILED = 80
-};
+/// Trace basic operation
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
 
-/// \brief Logger for this library
+/// This includes messages being sent and received, waiting for messages
+/// and alike.
+const int DBG_TRACE_DETAILED = DBGLVL_TRACE_DETAIL;
+
+// Declaration of the logger.
 extern isc::log::Logger logger;
 
-}
-}
+} // namespace cc
+} // namespace isc
+
+/// \brief Logger for this library
 
 #endif
diff --git a/src/lib/cc/session.cc b/src/lib/cc/session.cc
index 97d5cf1..0052aca 100644
--- a/src/lib/cc/session.cc
+++ b/src/lib/cc/session.cc
@@ -119,7 +119,7 @@ private:
 void
 SessionImpl::establish(const char& socket_file) {
     try {
-        LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(socket_file);
+        LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISH).arg(&socket_file);
         socket_.connect(asio::local::stream_protocol::endpoint(&socket_file),
                         error_);
         LOG_DEBUG(logger, DBG_TRACE_BASIC, CC_ESTABLISHED);
@@ -254,7 +254,8 @@ SessionImpl::internalRead(const asio::error_code& error,
     }
 }
 
-Session::Session(io_service& io_service) : impl_(new SessionImpl(io_service))
+Session::Session(asio::io_service& io_service) :
+    impl_(new SessionImpl(io_service))
 {}
 
 Session::~Session() {
diff --git a/src/lib/config/ccsession.cc b/src/lib/config/ccsession.cc
index 6b094ec..ac85077 100644
--- a/src/lib/config/ccsession.cc
+++ b/src/lib/config/ccsession.cc
@@ -18,12 +18,15 @@
 #include <stdlib.h>
 #include <string.h>
 #include <sys/time.h>
+#include <ctype.h>
 
-#include <iostream>
-#include <fstream>
-#include <sstream>
+#include <algorithm>
 #include <cerrno>
+#include <fstream>
+#include <iostream>
 #include <set>
+#include <sstream>
+#include <string>
 
 #include <boost/bind.hpp>
 #include <boost/foreach.hpp>
@@ -175,6 +178,36 @@ ConstElementPtr getValueOrDefault(ConstElementPtr config_part,
     }
 }
 
+// Prefix name with "b10-".
+//
+// In BIND 10, modules have names taken from the .spec file, which are typically
+// names starting with a capital letter (e.g. "Resolver", "Auth" etc.).  The
+// names of the associated binaries are derived from the module names, being
+// prefixed "b10-" and having the first letter of the module name lower-cased
+// (e.g. "b10-resolver", "b10-auth").  (It is a required convention that there
+// be this relationship between the names.)
+//
+// Within the binaries the root loggers are named after the binaries themselves.
+// (The reason for this is that the name of the logger is included in the
+// message logged, so making it clear which message comes from which BIND 10
+// process.) As logging is configured using module names, the configuration code
+// has to match these with the corresponding logger names. This function
+// converts a module name to a root logger name by lowercasing the first letter
+// of the module name and prepending "b10-".
+//
+// \param instring String to convert.  (This may be empty, in which case
+//        "b10-" will be returned.)
+//
+// \return Converted string.
+std::string
+b10Prefix(const std::string& instring) {
+    std::string result = instring;
+    if (!result.empty()) {
+        result[0] = tolower(result[0]);
+    }
+    return (std::string("b10-") + result);
+}
+
 // Reads a output_option subelement of a logger configuration,
 // and sets the values thereing to the given OutputOption struct,
 // or defaults values if they are not provided (from config_data).
@@ -215,6 +248,7 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
                 ConstElementPtr logger,
                 const ConfigData& config_data)
 {
+    // Read name, adding prefix as required.
     std::string lname = logger->get("name")->stringValue();
 
     ConstElementPtr severity_el = getValueOrDefault(logger,
@@ -247,6 +281,27 @@ readLoggersConf(std::vector<isc::log::LoggerSpecification>& specs,
     specs.push_back(logger_spec);
 }
 
+// Copies the map for a logger, changing the name of the logger in the process.
+// This is used because the map being copied is "const", so in order to
+// change the name we need to create a new one.
+//
+// \param cur_logger Logger being copied.
+// \param new_name New value of the "name" element at the top level.
+//
+// \return Pointer to the map with the updated element.
+ConstElementPtr
+copyLogger(ConstElementPtr& cur_logger, const std::string& new_name) {
+
+    // Since we'll only be updating one first-level element and subsequent
+    // use won't change the contents of the map, a shallow map copy is enough.
+    ElementPtr new_logger(Element::createMap());
+    new_logger->setValue(cur_logger->mapValue());
+    new_logger->set("name", Element::create(new_name));
+
+    return (new_logger);
+}
+
+
 } // end anonymous namespace
 
 
@@ -259,38 +314,60 @@ getRelatedLoggers(ConstElementPtr loggers) {
     ElementPtr result = isc::data::Element::createList();
 
     BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
+        // Need to add the b10- prefix to names ready from the spec file.
         const std::string cur_name = cur_logger->get("name")->stringValue();
-        if (cur_name == root_name || cur_name.find(root_name + ".") == 0) {
-            our_names.insert(cur_name);
-            result->add(cur_logger);
+        const std::string mod_name = b10Prefix(cur_name);
+        if (mod_name == root_name || mod_name.find(root_name + ".") == 0) {
+
+            // Note this name so that we don't add a wildcard that matches it.
+            our_names.insert(mod_name);
+
+            // We want to store the logger with the modified name (i.e. with
+            // the b10- prefix).  As we are dealing with const loggers, we
+            // store a modified copy of the data.
+            result->add(copyLogger(cur_logger, mod_name));
+            LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS, CONFIG_LOG_EXPLICIT)
+                      .arg(cur_name);
+
+        } else if (!cur_name.empty() && (cur_name[0] != '*')) {
+            // Not a wildcard logger and we are ignoring it.
+            LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+                      CONFIG_LOG_IGNORE_EXPLICIT).arg(cur_name);
         }
     }
 
-    // now find the * names
+    // Now find the wildcard names (the one that start with "*").
     BOOST_FOREACH(ConstElementPtr cur_logger, loggers->listValue()) {
         std::string cur_name = cur_logger->get("name")->stringValue();
-        // if name is '*', or starts with '*.', replace * with root
-        // logger name
+        // If name is '*', or starts with '*.', replace * with root
+        // logger name.
         if (cur_name == "*" || cur_name.length() > 1 &&
             cur_name[0] == '*' && cur_name[1] == '.') {
 
-            cur_name = root_name + cur_name.substr(1);
-            // now add it to the result list, but only if a logger with
-            // that name was not configured explicitely
-            if (our_names.find(cur_name) == our_names.end()) {
-                // we substitute the name here already, but as
-                // we are dealing with consts, we copy the data
-                ElementPtr new_logger(Element::createMap());
-                // since we'll only be updating one first-level element,
-                // and we return as const again, a shallow map copy is
-                // enough
-                new_logger->setValue(cur_logger->mapValue());
-                new_logger->set("name", Element::create(cur_name));
-                result->add(new_logger);
+            // Substitute the "*" with the root name
+            std::string mod_name = cur_name;
+            mod_name.replace(0, 1, root_name);
+
+            // Now add it to the result list, but only if a logger with
+            // that name was not configured explicitly.
+            if (our_names.find(mod_name) == our_names.end()) {
+
+                // We substitute the name here, but as we are dealing with
+                // consts, we need to copy the data.
+                result->add(copyLogger(cur_logger, mod_name));
+                LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+                          CONFIG_LOG_WILD_MATCH).arg(cur_name);
+
+            } else if (!cur_name.empty() && (cur_name[0] == '*')) {
+                // Is a wildcard and we are ignoring it (because the wildcard
+                // expands to a specification that we already encountered when
+                // processing explicit names).
+                LOG_DEBUG(config_logger, DBG_CONFIG_PROCESS,
+                          CONFIG_LOG_IGNORE_WILD).arg(cur_name);
             }
         }
     }
-    return result;
+    return (result);
 }
 
 void
@@ -318,7 +395,7 @@ ModuleSpec
 ModuleCCSession::readModuleSpecification(const std::string& filename) {
     std::ifstream file;
     ModuleSpec module_spec;
-    
+
     // this file should be declared in a @something@ directive
     file.open(filename.c_str());
     if (!file) {
@@ -385,7 +462,7 @@ ModuleCCSession::ModuleCCSession(
         LOG_ERROR(config_logger, CONFIG_MOD_SPEC_REJECT).arg(answer->str());
         isc_throw(CCSessionInitError, answer->str());
     }
-    
+
     setLocalConfig(Element::fromJSON("{}"));
     // get any stored configuration from the manager
     if (config_handler_) {
@@ -511,7 +588,7 @@ int
 ModuleCCSession::checkCommand() {
     ConstElementPtr cmd, routing, data;
     if (session_.group_recvmsg(routing, data, true)) {
-        
+
         /* ignore result messages (in case we're out of sync, to prevent
          * pingpongs */
         if (data->getType() != Element::map || data->contains("result")) {
diff --git a/src/lib/config/ccsession.h b/src/lib/config/ccsession.h
index a39d996..50bb65c 100644
--- a/src/lib/config/ccsession.h
+++ b/src/lib/config/ccsession.h
@@ -377,10 +377,10 @@ default_logconfig_handler(const std::string& module_name,
 /// \brief Returns the loggers related to this module
 ///
 /// This function does two things;
-/// - it drops the configuration parts for loggers for other modules
+/// - it drops the configuration parts for loggers for other modules.
 /// - it replaces the '*' in the name of the loggers by the name of
 ///   this module, but *only* if the expanded name is not configured
-///   explicitely
+///   explicitly.
 ///
 /// Examples: if this is the module b10-resolver,
 /// For the config names ['*', 'b10-auth']
diff --git a/src/lib/config/config_log.h b/src/lib/config/config_log.h
index 0063855..21709fd 100644
--- a/src/lib/config/config_log.h
+++ b/src/lib/config/config_log.h
@@ -30,7 +30,10 @@ namespace config {
 /// Define the logger used to log messages.  We could define it in multiple
 /// modules, but defining in a single module and linking to it saves time and
 /// space.
-extern isc::log::Logger config_logger;    // isc::config::config_logger is the CONFIG logger
+extern isc::log::Logger config_logger;
+
+// Enumerate configuration elements as they are processed.
+const int DBG_CONFIG_PROCESS = DBGLVL_TRACE_BASIC;
 
 } // namespace config
 } // namespace isc
diff --git a/src/lib/config/config_messages.mes b/src/lib/config/config_messages.mes
index 660ab9a..c439edd 100644
--- a/src/lib/config/config_messages.mes
+++ b/src/lib/config/config_messages.mes
@@ -37,6 +37,31 @@ manager is appended to the log error. The most likely cause is that
 the module is of a different (command specification) version than the
 running configuration manager.
 
+% CONFIG_LOG_EXPLICIT will use logging configuration for explicitly-named logger %1
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the named
+logger that matches the logger specification for the program.  The logging
+configuration for the program will be updated with the information.
+
+% CONFIG_LOG_IGNORE_EXPLICIT ignoring logging configuration for explicitly-named logger %1
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found an entry for the
+named logger.  As this does not match the logger specification for the
+program, it has been ignored.
+
+% CONFIG_LOG_IGNORE_WILD ignoring logging configuration for wildcard logger %1
+This is a debug message.  When processing the "loggers" part of the
+configuration file, the configuration library found the named wildcard
+entry (one containing the "*" character) that matched a logger already
+matched by an explicitly named entry.  The configuration is ignored.
+
+% CONFIG_LOG_WILD_MATCH will use logging configuration for wildcard logger %1
+This is a debug message.  When processing the "loggers" part of
+the configuration file, the configuration library found the named
+wildcard entry (one containing the "*" character) that matches a logger
+specification in the program. The logging configuration for the program
+will be updated with the information.
+
 % CONFIG_JSON_PARSE JSON parse error in %1: %2
 There was an error parsing the JSON file. The given file does not appear
 to be in valid JSON format. Please verify that the filename is correct
diff --git a/src/lib/config/module_spec.cc b/src/lib/config/module_spec.cc
index 1621fe3..bebe695 100644
--- a/src/lib/config/module_spec.cc
+++ b/src/lib/config/module_spec.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010  Internet Systems Consortium.
+// Copyright (C) 2010, 2011  Internet Systems Consortium.
 //
 // Permission to use, copy, modify, and distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
@@ -67,10 +67,13 @@ check_config_item(ConstElementPtr spec) {
         check_leaf_item(spec, "list_item_spec", Element::map, true);
         check_config_item(spec->get("list_item_spec"));
     }
-    // todo: add stuff for type map
-    if (Element::nameToType(spec->get("item_type")->stringValue()) == Element::map) {
+
+    if (spec->get("item_type")->stringValue() == "map") {
         check_leaf_item(spec, "map_item_spec", Element::list, true);
         check_config_item_list(spec->get("map_item_spec"));
+    } else if (spec->get("item_type")->stringValue() == "named_set") {
+        check_leaf_item(spec, "named_set_item_spec", Element::map, true);
+        check_config_item(spec->get("named_set_item_spec"));
     }
 }
 
@@ -84,6 +87,61 @@ check_config_item_list(ConstElementPtr spec) {
     }
 }
 
+// checks whether the given element is a valid statistics specification
+// returns false if the specification is bad
+bool
+check_format(ConstElementPtr value, ConstElementPtr format_name) {
+    typedef std::map<std::string, std::string> format_types;
+    format_types time_formats;
+    // TODO: should be added other format types if necessary
+    time_formats.insert(
+        format_types::value_type("date-time", "%Y-%m-%dT%H:%M:%SZ") );
+    time_formats.insert(
+        format_types::value_type("date", "%Y-%m-%d") );
+    time_formats.insert(
+        format_types::value_type("time", "%H:%M:%S") );
+    BOOST_FOREACH (const format_types::value_type& f, time_formats) {
+        if (format_name->stringValue() == f.first) {
+            struct tm tm;
+            std::vector<char> buf(32);
+            memset(&tm, 0, sizeof(tm));
+            // reverse check
+            return (strptime(value->stringValue().c_str(),
+                             f.second.c_str(), &tm) != NULL
+                    && strftime(&buf[0], buf.size(),
+                                f.second.c_str(), &tm) != 0
+                    && strncmp(value->stringValue().c_str(),
+                               &buf[0], buf.size()) == 0);
+        }
+    }
+    return (false);
+}
+
+void check_statistics_item_list(ConstElementPtr spec);
+
+void
+check_statistics_item_list(ConstElementPtr spec) {
+    if (spec->getType() != Element::list) {
+        throw ModuleSpecError("statistics is not a list of elements");
+    }
+    BOOST_FOREACH(ConstElementPtr item, spec->listValue()) {
+        check_config_item(item);
+        // additional checks for statistics
+        check_leaf_item(item, "item_title", Element::string, true);
+        check_leaf_item(item, "item_description", Element::string, true);
+        check_leaf_item(item, "item_format", Element::string, false);
+        // checks name of item_format and validation of item_default
+        if (item->contains("item_format")
+            && item->contains("item_default")) {
+            if(!check_format(item->get("item_default"),
+                             item->get("item_format"))) {
+                throw ModuleSpecError(
+                    "item_default not valid type of item_format");
+            }
+        }
+    }
+}
+
 void
 check_command(ConstElementPtr spec) {
     check_leaf_item(spec, "command_name", Element::string, true);
@@ -113,6 +171,9 @@ check_data_specification(ConstElementPtr spec) {
     if (spec->contains("commands")) {
         check_command_list(spec->get("commands"));
     }
+    if (spec->contains("statistics")) {
+        check_statistics_item_list(spec->get("statistics"));
+    }
 }
 
 // checks whether the given element is a valid module specification
@@ -162,6 +223,15 @@ ModuleSpec::getConfigSpec() const {
     }
 }
 
+ConstElementPtr
+ModuleSpec::getStatisticsSpec() const {
+    if (module_specification->contains("statistics")) {
+        return (module_specification->get("statistics"));
+    } else {
+        return (ElementPtr());
+    }
+}
+
 const std::string
 ModuleSpec::getModuleName() const {
     return (module_specification->get("module_name")->stringValue());
@@ -183,6 +253,12 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full) const {
 }
 
 bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full) const {
+    ConstElementPtr spec = module_specification->find("statistics");
+    return (validateSpecList(spec, data, full, ElementPtr()));
+}
+
+bool
 ModuleSpec::validateCommand(const std::string& command,
                              ConstElementPtr args,
                              ElementPtr errors) const
@@ -220,6 +296,14 @@ ModuleSpec::validateConfig(ConstElementPtr data, const bool full,
     return (validateSpecList(spec, data, full, errors));
 }
 
+bool
+ModuleSpec::validateStatistics(ConstElementPtr data, const bool full,
+                               ElementPtr errors) const
+{
+    ConstElementPtr spec = module_specification->find("statistics");
+    return (validateSpecList(spec, data, full, errors));
+}
+
 ModuleSpec
 moduleSpecFromFile(const std::string& file_name, const bool check)
                    throw(JSONError, ModuleSpecError)
@@ -286,7 +370,8 @@ check_type(ConstElementPtr spec, ConstElementPtr element) {
             return (cur_item_type == "list");
             break;
         case Element::map:
-            return (cur_item_type == "map");
+            return (cur_item_type == "map" ||
+                    cur_item_type == "named_set");
             break;
     }
     return (false);
@@ -323,7 +408,27 @@ ModuleSpec::validateItem(ConstElementPtr spec, ConstElementPtr data,
         }
     }
     if (data->getType() == Element::map) {
-        if (!validateSpecList(spec->get("map_item_spec"), data, full, errors)) {
+        // either a normal 'map' or a 'named set' (determined by which
+        // subspecification it has)
+        if (spec->contains("map_item_spec")) {
+            if (!validateSpecList(spec->get("map_item_spec"), data, full, errors)) {
+                return (false);
+            }
+        } else {
+            typedef std::pair<std::string, ConstElementPtr> maptype;
+
+            BOOST_FOREACH(maptype m, data->mapValue()) {
+                if (!validateItem(spec->get("named_set_item_spec"), m.second, full, errors)) {
+                    return (false);
+                }
+            }
+        }
+    }
+    if (spec->contains("item_format")) {
+        if (!check_format(data, spec->get("item_format"))) {
+            if (errors) {
+                errors->add(Element::create("Format mismatch"));
+            }
             return (false);
         }
     }
diff --git a/src/lib/config/module_spec.h b/src/lib/config/module_spec.h
index ab6e273..ce3762f 100644
--- a/src/lib/config/module_spec.h
+++ b/src/lib/config/module_spec.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010  Internet Systems Consortium.
+// Copyright (C) 2010, 2011  Internet Systems Consortium.
 //
 // Permission to use, copy, modify, and distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
@@ -71,6 +71,12 @@ namespace isc { namespace config {
         ///                    part of the specification
         isc::data::ConstElementPtr getConfigSpec() const;
 
+        /// Returns the statistics part of the specification as an
+        /// ElementPtr
+        /// \return ElementPtr Shared pointer to the statistics
+        ///                    part of the specification
+        isc::data::ConstElementPtr getStatisticsSpec() const;
+
         /// Returns the full module specification as an ElementPtr
         /// \return ElementPtr Shared pointer to the specification
         isc::data::ConstElementPtr getFullSpec() const {
@@ -95,6 +101,17 @@ namespace isc { namespace config {
         bool validateConfig(isc::data::ConstElementPtr data,
                              const bool full = false) const;
 
+        // returns true if the given element conforms to this data
+        // statistics specification
+        /// Validates the given statistics data for this specification.
+        /// \param data The base \c Element of the data to check
+        /// \param full If true, all non-optional statistics parameters
+        /// must be specified.
+        /// \return true if the data conforms to the specification,
+        /// false otherwise.
+        bool validateStatistics(isc::data::ConstElementPtr data,
+                             const bool full = false) const;
+
         /// Validates the arguments for the given command
         ///
         /// This checks the command and argument against the
@@ -142,6 +159,10 @@ namespace isc { namespace config {
         bool validateConfig(isc::data::ConstElementPtr data, const bool full,
                              isc::data::ElementPtr errors) const;
 
+        /// errors must be of type ListElement
+        bool validateStatistics(isc::data::ConstElementPtr data, const bool full,
+                                isc::data::ElementPtr errors) const;
+
     private:
         bool validateItem(isc::data::ConstElementPtr spec,
                           isc::data::ConstElementPtr data,
diff --git a/src/lib/config/tests/Makefile.am b/src/lib/config/tests/Makefile.am
index 7153e09..2f1fc6f 100644
--- a/src/lib/config/tests/Makefile.am
+++ b/src/lib/config/tests/Makefile.am
@@ -11,7 +11,7 @@ endif
 
 CLEANFILES = *.gcno *.gcda
 
-lib_LTLIBRARIES = libfake_session.la
+noinst_LTLIBRARIES = libfake_session.la
 libfake_session_la_SOURCES = fake_session.h fake_session.cc
 
 TESTS =
diff --git a/src/lib/config/tests/ccsession_unittests.cc b/src/lib/config/tests/ccsession_unittests.cc
index 283fcc4..793fa30 100644
--- a/src/lib/config/tests/ccsession_unittests.cc
+++ b/src/lib/config/tests/ccsession_unittests.cc
@@ -44,7 +44,9 @@ el(const std::string& str) {
 
 class CCSessionTest : public ::testing::Test {
 protected:
-    CCSessionTest() : session(el("[]"), el("[]"), el("[]")) {
+    CCSessionTest() : session(el("[]"), el("[]"), el("[]")),
+                      root_name(isc::log::getRootLoggerName())
+    {
         // upon creation of a ModuleCCSession, the class
         // sends its specification to the config manager.
         // it expects an ok answer back, so everytime we
@@ -52,8 +54,11 @@ protected:
         // ok answer.
         session.getMessages()->add(createAnswer());
     }
-    ~CCSessionTest() {}
+    ~CCSessionTest() {
+        isc::log::setRootLoggerName(root_name);
+    }
     FakeSession session;
+    const std::string root_name;
 };
 
 TEST_F(CCSessionTest, createAnswer) {
@@ -179,7 +184,7 @@ TEST_F(CCSessionTest, session2) {
     ConstElementPtr msg;
     std::string group, to;
     msg = session.getFirstMessage(group, to);
-    EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [  ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
 ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": {  }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+    EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [  ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
 ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": {  }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
     EXPECT_EQ("ConfigManager", group);
     EXPECT_EQ("*", to);
     EXPECT_EQ(0, session.getMsgQueue()->size());
@@ -226,7 +231,7 @@ TEST_F(CCSessionTest, session3) {
     ConstElementPtr msg;
     std::string group, to;
     msg = session.getFirstMessage(group, to);
-    EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [  ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
 ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": {  }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\" } ] }", msg->str());
+    EXPECT_EQ("{ \"command\": [ \"module_spec\", { \"commands\": [ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [  ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ], \"config_data\": [ { \"item_default\": 1, \"item_name\": \"item1\", \"item_optional\": false, \"item_type\": \"integer\" }, { \"item_default\": 1.1, \"item_name\": \"item2\", \"item_optional\": false, \"item_type\": \"real\" }, { \"item_default\": true, \"item_name\": \"item3\", \"item_optional\": false, \"item_type\": \"boolean\" }, { \"item_default\": \"test\", \"item_name\": \"item4\", \"item_optional\": false, \"item_type\": \"string\" }, { \"item_default\": [ \"a\", \"b\" ], \"item_name\": \"item5\", \"item_optional\": false, \"item_type\": \"list\", \"list_item_sp
 ec\": { \"item_default\": \"\", \"item_name\": \"list_element\", \"item_optional\": false, \"item_type\": \"string\" } }, { \"item_default\": {  }, \"item_name\": \"item6\", \"item_optional\": false, \"item_type\": \"map\", \"map_item_spec\": [ { \"item_default\": \"default\", \"item_name\": \"value1\", \"item_optional\": true, \"item_type\": \"string\" }, { \"item_name\": \"value2\", \"item_optional\": true, \"item_type\": \"integer\" } ] } ], \"module_name\": \"Spec2\", \"statistics\": [ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ] } ] }", msg->str());
     EXPECT_EQ("ConfigManager", group);
     EXPECT_EQ("*", to);
     EXPECT_EQ(1, session.getMsgQueue()->size());
@@ -652,41 +657,44 @@ void doRelatedLoggersTest(const char* input, const char* expected) {
 TEST(LogConfigTest, relatedLoggersTest) {
     // make sure logger configs for 'other' programs are ignored,
     // and that * is substituted correctly
-    // The default root logger name is "bind10"
+    // We'll use a root logger name of "b10-test".
+    isc::log::setRootLoggerName("b10-test");
+
     doRelatedLoggersTest("[{ \"name\": \"other_module\" }]",
                          "[]");
     doRelatedLoggersTest("[{ \"name\": \"other_module.somelib\" }]",
                          "[]");
-    doRelatedLoggersTest("[{ \"name\": \"bind10_other\" }]",
+    doRelatedLoggersTest("[{ \"name\": \"test_other\" }]",
                          "[]");
-    doRelatedLoggersTest("[{ \"name\": \"bind10_other.somelib\" }]",
+    doRelatedLoggersTest("[{ \"name\": \"test_other.somelib\" }]",
                          "[]");
     doRelatedLoggersTest("[ { \"name\": \"other_module\" },"
-                         "  { \"name\": \"bind10\" }]",
-                         "[ { \"name\": \"bind10\" } ]");
-    doRelatedLoggersTest("[ { \"name\": \"bind10\" }]",
-                         "[ { \"name\": \"bind10\" } ]");
-    doRelatedLoggersTest("[ { \"name\": \"bind10.somelib\" }]",
-                         "[ { \"name\": \"bind10.somelib\" } ]");
+                         "  { \"name\": \"test\" }]",
+                         "[ { \"name\": \"b10-test\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"test\" }]",
+                         "[ { \"name\": \"b10-test\" } ]");
+    doRelatedLoggersTest("[ { \"name\": \"test.somelib\" }]",
+                         "[ { \"name\": \"b10-test.somelib\" } ]");
     doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
-                         "  { \"name\": \"bind10.somelib\" }]",
-                         "[ { \"name\": \"bind10.somelib\" } ]");
+                         "  { \"name\": \"test.somelib\" }]",
+                         "[ { \"name\": \"b10-test.somelib\" } ]");
     doRelatedLoggersTest("[ { \"name\": \"other_module.somelib\" },"
-                         "  { \"name\": \"bind10\" },"
-                         "  { \"name\": \"bind10.somelib\" }]",
-                         "[ { \"name\": \"bind10\" },"
-                         "  { \"name\": \"bind10.somelib\" } ]");
+                         "  { \"name\": \"test\" },"
+                         "  { \"name\": \"test.somelib\" }]",
+                         "[ { \"name\": \"b10-test\" },"
+                         "  { \"name\": \"b10-test.somelib\" } ]");
     doRelatedLoggersTest("[ { \"name\": \"*\" }]",
-                         "[ { \"name\": \"bind10\" } ]");
+                         "[ { \"name\": \"b10-test\" } ]");
     doRelatedLoggersTest("[ { \"name\": \"*.somelib\" }]",
-                         "[ { \"name\": \"bind10.somelib\" } ]");
+                         "[ { \"name\": \"b10-test.somelib\" } ]");
     doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
-                         "  { \"name\": \"bind10\", \"severity\": \"WARN\"}]",
-                         "[ { \"name\": \"bind10\", \"severity\": \"WARN\"} ]");
+                         "  { \"name\": \"test\", \"severity\": \"WARN\"}]",
+                         "[ { \"name\": \"b10-test\", \"severity\": \"WARN\"} ]");
     doRelatedLoggersTest("[ { \"name\": \"*\", \"severity\": \"DEBUG\" },"
                          "  { \"name\": \"some_module\", \"severity\": \"WARN\"}]",
-                         "[ { \"name\": \"bind10\", \"severity\": \"DEBUG\"} ]");
-
+                         "[ { \"name\": \"b10-test\", \"severity\": \"DEBUG\"} ]");
+    doRelatedLoggersTest("[ { \"name\": \"b10-test\" }]",
+                         "[]");
     // make sure 'bad' things like '*foo.x' or '*lib' are ignored
     // (cfgmgr should have already caught it in the logconfig plugin
     // check, and is responsible for reporting the error)
@@ -696,8 +704,8 @@ TEST(LogConfigTest, relatedLoggersTest) {
                          "[ ]");
     doRelatedLoggersTest("[ { \"name\": \"*foo\" },"
                          "  { \"name\": \"*foo.lib\" },"
-                         "  { \"name\": \"bind10\" } ]",
-                         "[ { \"name\": \"bind10\" } ]");
+                         "  { \"name\": \"test\" } ]",
+                         "[ { \"name\": \"b10-test\" } ]");
 }
 
 }
diff --git a/src/lib/config/tests/module_spec_unittests.cc b/src/lib/config/tests/module_spec_unittests.cc
index 1b43350..b2ca7b4 100644
--- a/src/lib/config/tests/module_spec_unittests.cc
+++ b/src/lib/config/tests/module_spec_unittests.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2009  Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2009, 2011  Internet Systems Consortium, Inc. ("ISC")
 //
 // Permission to use, copy, modify, and/or distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
@@ -18,6 +18,8 @@
 
 #include <fstream>
 
+#include <boost/foreach.hpp>
+
 #include <config/tests/data_def_unittests_config.h>
 
 using namespace isc::data;
@@ -57,6 +59,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
 
     dd = moduleSpecFromFile(specfile("spec2.spec"));
     EXPECT_EQ("[ { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\", \"command_name\": \"print_message\" }, { \"command_args\": [  ], \"command_description\": \"Shut down BIND 10\", \"command_name\": \"shutdown\" } ]", dd.getCommandsSpec()->str());
+    EXPECT_EQ("[ { \"item_default\": \"1970-01-01T00:00:00Z\", \"item_description\": \"A dummy date time\", \"item_format\": \"date-time\", \"item_name\": \"dummy_time\", \"item_optional\": false, \"item_title\": \"Dummy Time\", \"item_type\": \"string\" } ]", dd.getStatisticsSpec()->str());
     EXPECT_EQ("Spec2", dd.getModuleName());
     EXPECT_EQ("", dd.getModuleDescription());
 
@@ -64,6 +67,11 @@ TEST(ModuleSpec, ReadingSpecfiles) {
     EXPECT_EQ("Spec25", dd.getModuleName());
     EXPECT_EQ("Just an empty module", dd.getModuleDescription());
     EXPECT_THROW(moduleSpecFromFile(specfile("spec26.spec")), ModuleSpecError);
+    EXPECT_THROW(moduleSpecFromFile(specfile("spec34.spec")), ModuleSpecError);
+    EXPECT_THROW(moduleSpecFromFile(specfile("spec35.spec")), ModuleSpecError);
+    EXPECT_THROW(moduleSpecFromFile(specfile("spec36.spec")), ModuleSpecError);
+    EXPECT_THROW(moduleSpecFromFile(specfile("spec37.spec")), ModuleSpecError);
+    EXPECT_THROW(moduleSpecFromFile(specfile("spec38.spec")), ModuleSpecError);
 
     std::ifstream file;
     file.open(specfile("spec1.spec").c_str());
@@ -71,6 +79,7 @@ TEST(ModuleSpec, ReadingSpecfiles) {
     EXPECT_EQ(dd.getFullSpec()->get("module_name")
                               ->stringValue(), "Spec1");
     EXPECT_TRUE(isNull(dd.getCommandsSpec()));
+    EXPECT_TRUE(isNull(dd.getStatisticsSpec()));
 
     std::ifstream file2;
     file2.open(specfile("spec8.spec").c_str());
@@ -114,6 +123,12 @@ TEST(ModuleSpec, SpecfileConfigData) {
                    "commands is not a list of elements");
 }
 
+TEST(ModuleSpec, SpecfileStatistics) {
+    moduleSpecError("spec36.spec", "item_default not valid type of item_format");
+    moduleSpecError("spec37.spec", "statistics is not a list of elements");
+    moduleSpecError("spec38.spec", "item_default not valid type of item_format");
+}
+
 TEST(ModuleSpec, SpecfileCommands) {
     moduleSpecError("spec17.spec",
                    "command_name missing in { \"command_args\": [ { \"item_default\": \"\", \"item_name\": \"message\", \"item_optional\": false, \"item_type\": \"string\" } ], \"command_description\": \"Print the given message to stdout\" }");
@@ -137,6 +152,17 @@ dataTest(const ModuleSpec& dd, const std::string& data_file_name) {
 }
 
 bool
+statisticsTest(const ModuleSpec& dd, const std::string& data_file_name) {
+    std::ifstream data_file;
+
+    data_file.open(specfile(data_file_name).c_str());
+    ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+    data_file.close();
+
+    return (dd.validateStatistics(data));
+}
+
+bool
 dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
                       ElementPtr errors)
 {
@@ -149,6 +175,19 @@ dataTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
     return (dd.validateConfig(data, true, errors));
 }
 
+bool
+statisticsTestWithErrors(const ModuleSpec& dd, const std::string& data_file_name,
+                      ElementPtr errors)
+{
+    std::ifstream data_file;
+
+    data_file.open(specfile(data_file_name).c_str());
+    ConstElementPtr data = Element::fromJSON(data_file, data_file_name);
+    data_file.close();
+
+    return (dd.validateStatistics(data, true, errors));
+}
+
 TEST(ModuleSpec, DataValidation) {
     ModuleSpec dd = moduleSpecFromFile(specfile("spec22.spec"));
 
@@ -175,6 +214,17 @@ TEST(ModuleSpec, DataValidation) {
     EXPECT_EQ("[ \"Unknown item value_does_not_exist\" ]", errors->str());
 }
 
+TEST(ModuleSpec, StatisticsValidation) {
+    ModuleSpec dd = moduleSpecFromFile(specfile("spec33.spec"));
+
+    EXPECT_TRUE(statisticsTest(dd, "data33_1.data"));
+    EXPECT_FALSE(statisticsTest(dd, "data33_2.data"));
+
+    ElementPtr errors = Element::createList();
+    EXPECT_FALSE(statisticsTestWithErrors(dd, "data33_2.data", errors));
+    EXPECT_EQ("[ \"Format mismatch\", \"Format mismatch\", \"Format mismatch\" ]", errors->str());
+}
+
 TEST(ModuleSpec, CommandValidation) {
     ModuleSpec dd = moduleSpecFromFile(specfile("spec2.spec"));
     ConstElementPtr arg = Element::fromJSON("{}");
@@ -211,3 +261,118 @@ TEST(ModuleSpec, CommandValidation) {
     EXPECT_EQ(errors->get(0)->stringValue(), "Type mismatch");
 
 }
+
+TEST(ModuleSpec, NamedSetValidation) {
+    ModuleSpec dd = moduleSpecFromFile(specfile("spec32.spec"));
+
+    ElementPtr errors = Element::createList();
+    EXPECT_TRUE(dataTestWithErrors(dd, "data32_1.data", errors));
+    EXPECT_FALSE(dataTest(dd, "data32_2.data"));
+    EXPECT_FALSE(dataTest(dd, "data32_3.data"));
+}
+
+TEST(ModuleSpec, CheckFormat) {
+
+    const std::string json_begin = "{ \"module_spec\": { \"module_name\": \"Foo\", \"statistics\": [ { \"item_name\": \"dummy_time\", \"item_type\": \"string\", \"item_optional\": true, \"item_title\": \"Dummy Time\", \"item_description\": \"A dummy date time\"";
+    const std::string json_end = " } ] } }";
+    std::string item_default;
+    std::string item_format;
+    std::vector<std::string> specs;
+    ConstElementPtr el;
+
+    specs.clear();
+    item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+    item_format  = "\"item_format\": \"date-time\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"2011-05-27\",";
+    item_format  = "\"item_format\": \"date\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"19:42:57\",";
+    item_format  = "\"item_format\": \"time\"";
+    specs.push_back("," + item_default + item_format);
+
+    item_format  = "\"item_format\": \"date-time\"";
+    specs.push_back("," + item_format);
+    item_default = "";
+    item_format  = "\"item_format\": \"date\"";
+    specs.push_back("," + item_format);
+    item_default = "";
+    item_format  = "\"item_format\": \"time\"";
+    specs.push_back("," + item_format);
+
+    item_default = "\"item_default\": \"a\"";
+    specs.push_back("," + item_default);
+    item_default = "\"item_default\": \"b\"";
+    specs.push_back("," + item_default);
+    item_default = "\"item_default\": \"c\"";
+    specs.push_back("," + item_default);
+
+    item_format  = "\"item_format\": \"dummy\"";
+    specs.push_back("," + item_format);
+
+    specs.push_back("");
+
+    BOOST_FOREACH(std::string s, specs) {
+        el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+        EXPECT_NO_THROW(ModuleSpec(el, true));
+    }
+
+    specs.clear();
+    item_default = "\"item_default\": \"2011-05-27T19:42:57Z\",";
+    item_format  = "\"item_format\": \"dummy\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"2011-05-27\",";
+    item_format  = "\"item_format\": \"dummy\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"19:42:57Z\",";
+    item_format  = "\"item_format\": \"dummy\"";
+    specs.push_back("," + item_default + item_format);
+
+    item_default = "\"item_default\": \"2011-13-99T99:99:99Z\",";
+    item_format  = "\"item_format\": \"date-time\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"2011-13-99\",";
+    item_format  = "\"item_format\": \"date\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"99:99:99Z\",";
+    item_format  = "\"item_format\": \"time\"";
+    specs.push_back("," + item_default + item_format);
+
+    item_default = "\"item_default\": \"1\",";
+    item_format  = "\"item_format\": \"date-time\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"1\",";
+    item_format  = "\"item_format\": \"date\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"1\",";
+    item_format  = "\"item_format\": \"time\"";
+    specs.push_back("," + item_default + item_format);
+
+    item_default = "\"item_default\": \"\",";
+    item_format  = "\"item_format\": \"date-time\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"\",";
+    item_format  = "\"item_format\": \"date\"";
+    specs.push_back("," + item_default + item_format);
+    item_default = "\"item_default\": \"\",";
+    item_format  = "\"item_format\": \"time\"";
+    specs.push_back("," + item_default + item_format);
+
+    // wrong date-time-type format not ending with "Z"
+    item_default = "\"item_default\": \"2011-05-27T19:42:57\",";
+    item_format  = "\"item_format\": \"date-time\"";
+    specs.push_back("," + item_default + item_format);
+    // wrong date-type format ending with "T"
+    item_default = "\"item_default\": \"2011-05-27T\",";
+    item_format  = "\"item_format\": \"date\"";
+    specs.push_back("," + item_default + item_format);
+    // wrong time-type format ending with "Z"
+    item_default = "\"item_default\": \"19:42:57Z\",";
+    item_format  = "\"item_format\": \"time\"";
+    specs.push_back("," + item_default + item_format);
+
+    BOOST_FOREACH(std::string s, specs) {
+        el = Element::fromJSON(json_begin + s + json_end)->get("module_spec");
+        EXPECT_THROW(ModuleSpec(el, true), ModuleSpecError);
+    }
+}
diff --git a/src/lib/config/tests/testdata/Makefile.am b/src/lib/config/tests/testdata/Makefile.am
index 57d1ed3..0d8b92e 100644
--- a/src/lib/config/tests/testdata/Makefile.am
+++ b/src/lib/config/tests/testdata/Makefile.am
@@ -22,6 +22,11 @@ EXTRA_DIST += data22_7.data
 EXTRA_DIST += data22_8.data
 EXTRA_DIST += data22_9.data
 EXTRA_DIST += data22_10.data
+EXTRA_DIST += data32_1.data
+EXTRA_DIST += data32_2.data
+EXTRA_DIST += data32_3.data
+EXTRA_DIST += data33_1.data
+EXTRA_DIST += data33_2.data
 EXTRA_DIST += spec1.spec
 EXTRA_DIST += spec2.spec
 EXTRA_DIST += spec3.spec
@@ -53,3 +58,10 @@ EXTRA_DIST += spec28.spec
 EXTRA_DIST += spec29.spec
 EXTRA_DIST += spec30.spec
 EXTRA_DIST += spec31.spec
+EXTRA_DIST += spec32.spec
+EXTRA_DIST += spec33.spec
+EXTRA_DIST += spec34.spec
+EXTRA_DIST += spec35.spec
+EXTRA_DIST += spec36.spec
+EXTRA_DIST += spec37.spec
+EXTRA_DIST += spec38.spec
diff --git a/src/lib/config/tests/testdata/data32_1.data b/src/lib/config/tests/testdata/data32_1.data
new file mode 100644
index 0000000..5695b52
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_1.data
@@ -0,0 +1,3 @@
+{
+    "named_set_item": { "foo": 1, "bar": 2 }
+}
diff --git a/src/lib/config/tests/testdata/data32_2.data b/src/lib/config/tests/testdata/data32_2.data
new file mode 100644
index 0000000..d5b9765
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_2.data
@@ -0,0 +1,3 @@
+{
+    "named_set_item": { "foo": "wrongtype", "bar": 2 }
+}
diff --git a/src/lib/config/tests/testdata/data32_3.data b/src/lib/config/tests/testdata/data32_3.data
new file mode 100644
index 0000000..85f32fe
--- /dev/null
+++ b/src/lib/config/tests/testdata/data32_3.data
@@ -0,0 +1,3 @@
+{
+    "named_set_item": []
+}
diff --git a/src/lib/config/tests/testdata/data33_1.data b/src/lib/config/tests/testdata/data33_1.data
new file mode 100644
index 0000000..429852c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_1.data
@@ -0,0 +1,7 @@
+{
+    "dummy_str": "Dummy String",
+    "dummy_int": 118,
+    "dummy_datetime": "2011-05-27T19:42:57Z",
+    "dummy_date": "2011-05-27",
+    "dummy_time": "19:42:57"
+}
diff --git a/src/lib/config/tests/testdata/data33_2.data b/src/lib/config/tests/testdata/data33_2.data
new file mode 100644
index 0000000..eb0615c
--- /dev/null
+++ b/src/lib/config/tests/testdata/data33_2.data
@@ -0,0 +1,7 @@
+{
+    "dummy_str": "Dummy String",
+    "dummy_int": 118,
+    "dummy_datetime": "xxxx",
+    "dummy_date": "xxxx",
+    "dummy_time": "xxxx"
+}
diff --git a/src/lib/config/tests/testdata/spec2.spec b/src/lib/config/tests/testdata/spec2.spec
index 59b8ebc..4352422 100644
--- a/src/lib/config/tests/testdata/spec2.spec
+++ b/src/lib/config/tests/testdata/spec2.spec
@@ -66,6 +66,17 @@
         "command_description": "Shut down BIND 10",
         "command_args": []
       }
+    ],
+    "statistics": [
+      {
+        "item_name": "dummy_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "Dummy Time",
+        "item_description": "A dummy date time",
+        "item_format": "date-time"
+      }
     ]
   }
 }
diff --git a/src/lib/config/tests/testdata/spec32.spec b/src/lib/config/tests/testdata/spec32.spec
new file mode 100644
index 0000000..0d8cf7c
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec32.spec
@@ -0,0 +1,40 @@
+{
+  "module_spec": {
+    "module_name": "Spec32",
+    "config_data": [
+      { "item_name": "named_set_item",
+        "item_type": "named_set",
+        "item_optional": false,
+        "item_default": { "a": 1, "b": 2 },
+        "named_set_item_spec": {
+          "item_name": "named_set_element",
+          "item_type": "integer",
+          "item_optional": false,
+          "item_default": 3
+        }
+      },
+      { "item_name": "named_set_item2",
+        "item_type": "named_set",
+        "item_optional": true,
+        "item_default": { },
+        "named_set_item_spec": {
+          "item_name": "named_set_element",
+          "item_type": "map",
+          "item_optional": false,
+          "item_default": {},
+          "map_item_spec": [
+          { "item_name": "first",
+            "item_type": "integer",
+            "item_optional": true
+          },
+          { "item_name": "second",
+            "item_type": "string",
+            "item_optional": true
+          }
+          ]
+        }
+      }
+    ]
+  }
+}
+
diff --git a/src/lib/config/tests/testdata/spec33.spec b/src/lib/config/tests/testdata/spec33.spec
new file mode 100644
index 0000000..3002488
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec33.spec
@@ -0,0 +1,50 @@
+{
+  "module_spec": {
+    "module_name": "Spec33",
+    "statistics": [
+      {
+        "item_name": "dummy_str",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "Dummy",
+        "item_title": "Dummy String",
+        "item_description": "A dummy string"
+      },
+      {
+        "item_name": "dummy_int",
+        "item_type": "integer",
+        "item_optional": false,
+        "item_default": 0,
+        "item_title": "Dummy Integer",
+        "item_description": "A dummy integer"
+      },
+      {
+        "item_name": "dummy_datetime",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01T00:00:00Z",
+        "item_title": "Dummy DateTime",
+        "item_description": "A dummy datetime",
+        "item_format": "date-time"
+      },
+      {
+        "item_name": "dummy_date",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "1970-01-01",
+        "item_title": "Dummy Date",
+        "item_description": "A dummy date",
+        "item_format": "date"
+      },
+      {
+        "item_name": "dummy_time",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "00:00:00",
+        "item_title": "Dummy Time",
+        "item_description": "A dummy time",
+        "item_format": "time"
+      }
+    ]
+  }
+}
diff --git a/src/lib/config/tests/testdata/spec34.spec b/src/lib/config/tests/testdata/spec34.spec
new file mode 100644
index 0000000..dd1f3ca
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec34.spec
@@ -0,0 +1,14 @@
+{
+  "module_spec": {
+    "module_name": "Spec34",
+    "statistics": [
+      {
+        "item_name": "dummy_str",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "Dummy",
+        "item_description": "A dummy string"
+      }
+    ]
+  }
+}
diff --git a/src/lib/config/tests/testdata/spec35.spec b/src/lib/config/tests/testdata/spec35.spec
new file mode 100644
index 0000000..86aaf14
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec35.spec
@@ -0,0 +1,15 @@
+{
+  "module_spec": {
+    "module_name": "Spec35",
+    "statistics": [
+      {
+        "item_name": "dummy_str",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "Dummy",
+        "item_title": "Dummy String"
+      }
+    ]
+  }
+}
+
diff --git a/src/lib/config/tests/testdata/spec36.spec b/src/lib/config/tests/testdata/spec36.spec
new file mode 100644
index 0000000..fb9ce26
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec36.spec
@@ -0,0 +1,17 @@
+{
+  "module_spec": {
+    "module_name": "Spec36",
+    "statistics": [
+      {
+        "item_name": "dummy_str",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "Dummy",
+        "item_title": "Dummy String",
+        "item_description": "A dummy string",
+        "item_format": "dummy"
+      }
+    ]
+  }
+}
+
diff --git a/src/lib/config/tests/testdata/spec37.spec b/src/lib/config/tests/testdata/spec37.spec
new file mode 100644
index 0000000..bc444d1
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec37.spec
@@ -0,0 +1,7 @@
+{
+  "module_spec": {
+    "module_name": "Spec37",
+    "statistics": 8
+  }
+}
+
diff --git a/src/lib/config/tests/testdata/spec38.spec b/src/lib/config/tests/testdata/spec38.spec
new file mode 100644
index 0000000..1892e88
--- /dev/null
+++ b/src/lib/config/tests/testdata/spec38.spec
@@ -0,0 +1,17 @@
+{
+  "module_spec": {
+    "module_name": "Spec38",
+    "statistics": [
+      {
+        "item_name": "dummy_datetime",
+        "item_type": "string",
+        "item_optional": false,
+        "item_default": "11",
+        "item_title": "Dummy DateTime",
+        "item_description": "A dummy datetime",
+        "item_format": "date-time"
+      }
+    ]
+  }
+}
+
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index 457d5b0..b6c314c 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,7 +7,13 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
+pkglibexecdir = $(libexecdir)/@PACKAGE@/backends
+
+datasrc_config.h: datasrc_config.h.pre
+	$(SED) -e "s|@@PKGLIBEXECDIR@@|$(pkglibexecdir)|" datasrc_config.h.pre >$@
+
 CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
+CLEANFILES += datasrc_config.h
 
 lib_LTLIBRARIES = libdatasrc.la
 libdatasrc_la_SOURCES = data_source.h data_source.cc
@@ -17,18 +23,35 @@ libdatasrc_la_SOURCES += query.h query.cc
 libdatasrc_la_SOURCES += cache.h cache.cc
 libdatasrc_la_SOURCES += rbtree.h
 libdatasrc_la_SOURCES += zonetable.h zonetable.cc
-libdatasrc_la_SOURCES += memory_datasrc.h memory_datasrc.cc
 libdatasrc_la_SOURCES += zone.h
 libdatasrc_la_SOURCES += result.h
 libdatasrc_la_SOURCES += logger.h logger.cc
+libdatasrc_la_SOURCES += client.h iterator.h
+libdatasrc_la_SOURCES += database.h database.cc
+libdatasrc_la_SOURCES += factory.h factory.cc
 nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
 
+pkglibexec_LTLIBRARIES =  sqlite3_ds.la memory_ds.la
+
+sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
+sqlite3_ds_la_LDFLAGS = -module
+sqlite3_ds_la_LDFLAGS += -no-undefined -version-info 1:0:0
+sqlite3_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+sqlite3_ds_la_LIBADD += libdatasrc.la
+sqlite3_ds_la_LIBADD += $(SQLITE_LIBS)
+
+memory_ds_la_SOURCES = memory_datasrc.h memory_datasrc.cc
+memory_ds_la_LDFLAGS = -module
+memory_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
+memory_ds_la_LIBADD += libdatasrc.la
+
 libdatasrc_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/dns/libdns++.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
+libdatasrc_la_LIBADD += $(SQLITE_LIBS)
 
-BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+BUILT_SOURCES = datasrc_config.h datasrc_messages.h datasrc_messages.cc
 datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
 	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
 
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
new file mode 100644
index 0000000..24c8850
--- /dev/null
+++ b/src/lib/datasrc/client.h
@@ -0,0 +1,371 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATA_SOURCE_CLIENT_H
+#define __DATA_SOURCE_CLIENT_H 1
+
+#include <utility>
+
+#include <boost/noncopyable.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <datasrc/zone.h>
+
+/// \file
+/// Datasource clients
+///
+/// The data source client API is specified in client.h, and provides the
+/// functionality to query and modify data in the data sources. There are
+/// multiple datasource implementations, and by subclassing DataSourceClient or
+/// DatabaseClient, more can be added.
+///
+/// All datasources are implemented as loadable modules, with a name of the
+/// form "<type>_ds.so". This has been chosen intentionally, to minimize
+/// confusion and potential mistakes.
+///
+/// In order to use a datasource client backend, the class
+/// DataSourceClientContainer is provided in factory.h; this will load the
+/// library, set up the instance, and clean everything up once it is destroyed.
+///
+/// Access to the actual instance is provided with the getInstance() method
+/// in DataSourceClientContainer
+///
+/// \note Depending on actual usage, we might consider making the container
+/// a transparent abstraction layer, so it can be used as a DataSourceClient
+/// directly. This has some other implications though so for now the only access
+/// provided is through getInstance()).
+///
+/// For datasource backends, we use a dynamically loaded library system (with
+/// dlopen()). This library must contain the following things;
+/// - A subclass of DataSourceClient or DatabaseClient (which itself is a
+///   subclass of DataSourceClient)
+/// - A creator function for an instance of that subclass, of the form:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+/// \endcode
+/// - A destructor for said instance, of the form:
+/// \code
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+///
+/// See the documentation for the \link DataSourceClient \endlink class for
+/// more information on implementing subclasses of it.
+///
+
+namespace isc {
+namespace datasrc {
+
+// The iterator.h is not included on purpose, most application won't need it
+class ZoneIterator;
+typedef boost::shared_ptr<ZoneIterator> ZoneIteratorPtr;
+
+/// \brief The base class of data source clients.
+///
+/// This is an abstract base class that defines the common interface for
+/// various types of data source clients.  A data source client is a top level
+/// access point to a data source, allowing various operations on the data
+/// source such as lookups, traversing or updates.  The client class itself
+/// has limited focus and delegates the responsibility for these specific
+/// operations to other classes; in general methods of this class act as
+/// factories of these other classes.
+///
+/// See \link datasrc/client.h datasrc/client.h \endlink for more information
+/// on adding datasource implementations.
+///
+/// The following derived classes are currently (expected to be) provided:
+/// - \c InMemoryClient: A client of a conceptual data source that stores
+/// all necessary data in memory for faster lookups
+/// - \c DatabaseClient: A client that uses a real database backend (such as
+/// an SQL database).  It would internally hold a connection to the underlying
+/// database system.
+///
+/// \note It is intentional that while the term these derived classes don't
+/// contain "DataSource" unlike their base class.  It's also noteworthy
+/// that the naming of the base class is somewhat redundant because the
+/// namespace \c datasrc would indicate that it's related to a data source.
+/// The redundant naming comes from the observation that namespaces are
+/// often omitted with \c using directives, in which case "Client"
+/// would be too generic.  On the other hand, concrete derived classes are
+/// generally not expected to be referenced directly from other modules and
+/// applications, so we'll give them more concise names such as InMemoryClient.
+///
+/// A single \c DataSourceClient object is expected to handle only a single
+/// RR class even if the underlying data source contains records for multiple
+/// RR classes.  Likewise, (when we support views) a \c DataSourceClient
+/// object is expected to handle only a single view.
+///
+/// If the application uses multiple threads, each thread will need to
+/// create and use a separate DataSourceClient.  This is because some
+/// database backend doesn't allow multiple threads to share the same
+/// connection to the database.
+///
+/// \note For a client using an in memory backend, this may result in
+/// having a multiple copies of the same data in memory, increasing the
+/// memory footprint substantially.  Depending on how to support multiple
+/// CPU cores for concurrent lookups on the same single data source (which
+/// is not fully fixed yet, and for which multiple threads may be used),
+/// this design may have to be revisited.
+///
+/// This class (and therefore its derived classes) are not copyable.
+/// This is because the derived classes would generally contain attributes
+/// that are not easy to copy (such as a large size of in memory data or a
+/// network connection to a database server).  In order to avoid a surprising
+/// disruption with a naive copy it's prohibited explicitly.  For the expected
+/// usage of the client classes the restriction should be acceptable.
+///
+/// \todo This class is still not complete. It will need more factory methods,
+/// e.g. for (re)loading a zone.
+class DataSourceClient : boost::noncopyable {
+public:
+    /// \brief A helper structure to represent the search result of
+    /// \c find().
+    ///
+    /// This is a straightforward pair of the result code and a share pointer
+    /// to the found zone to represent the result of \c find().
+    /// We use this in order to avoid overloading the return value for both
+    /// the result code ("success" or "not found") and the found object,
+    /// i.e., avoid using \c NULL to mean "not found", etc.
+    ///
+    /// This is a simple value class with no internal state, so for
+    /// convenience we allow the applications to refer to the members
+    /// directly.
+    ///
+    /// See the description of \c find() for the semantics of the member
+    /// variables.
+    struct FindResult {
+        FindResult(result::Result param_code,
+                   const ZoneFinderPtr param_zone_finder) :
+            code(param_code), zone_finder(param_zone_finder)
+        {}
+        const result::Result code;
+        const ZoneFinderPtr zone_finder;
+    };
+
+    ///
+    /// \name Constructors and Destructor.
+    ///
+protected:
+    /// Default constructor.
+    ///
+    /// This is intentionally defined as protected as this base class
+    /// should never be instantiated directly.
+    ///
+    /// The constructor of a concrete derived class may throw an exception.
+    /// This interface does not specify which exceptions can happen (at least
+    /// at this moment), and the caller should expect any type of exception
+    /// and react accordingly.
+    DataSourceClient() {}
+
+public:
+    /// The destructor.
+    virtual ~DataSourceClient() {}
+    //@}
+
+    /// Returns a \c ZoneFinder for a zone that best matches the given name.
+    ///
+    /// A concrete derived version of this method gets access to its backend
+    /// data source to search for a zone whose origin gives the longest match
+    /// against \c name.  It returns the search result in the form of a
+    /// \c FindResult object as follows:
+    /// - \c code: The result code of the operation.
+    ///   - \c result::SUCCESS: A zone that gives an exact match is found
+    ///   - \c result::PARTIALMATCH: A zone whose origin is a
+    ///   super domain of \c name is found (but there is no exact match)
+    ///   - \c result::NOTFOUND: For all other cases.
+    /// - \c zone_finder: Pointer to a \c ZoneFinder object for the found zone
+    /// if one is found; otherwise \c NULL.
+    ///
+    /// A specific derived version of this method may throw an exception.
+    /// This interface does not specify which exceptions can happen (at least
+    /// at this moment), and the caller should expect any type of exception
+    /// and react accordingly.
+    ///
+    /// \param name A domain name for which the search is performed.
+    /// \return A \c FindResult object enclosing the search result (see above).
+    virtual FindResult findZone(const isc::dns::Name& name) const = 0;
+
+    /// \brief Returns an iterator to the given zone
+    ///
+    /// This allows for traversing the whole zone. The returned object can
+    /// provide the RRsets one by one.
+    ///
+    /// This throws DataSourceError when the zone does not exist in the
+    /// datasource.
+    ///
+    /// The default implementation throws isc::NotImplemented. This allows
+    /// for easy and fast deployment of minimal custom data sources, where
+    /// the user/implementator doesn't have to care about anything else but
+    /// the actual queries. Also, in some cases, it isn't possible to traverse
+    /// the zone from logic point of view (eg. dynamically generated zone
+    /// data).
+    ///
+    /// It is not fixed if a concrete implementation of this method can throw
+    /// anything else.
+    ///
+    /// \param name The name of zone apex to be traversed. It doesn't do
+    ///     nearest match as findZone.
+    /// \param separate_rrs If true, the iterator will return each RR as a
+    ///                     new RRset object. If false, the iterator will
+    ///                     combine consecutive RRs with the name and type
+    ///                     into 1 RRset. The capitalization of the RRset will
+    ///                     be that of the first RR read, and TTLs will be
+    ///                     adjusted to the lowest one found.
+    /// \return Pointer to the iterator.
+    virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
+                                        bool separate_rrs = false) const {
+        // This is here to both document the parameter in doxygen (therefore it
+        // needs a name) and avoid unused parameter warning.
+        static_cast<void>(name);
+        static_cast<void>(separate_rrs);
+
+        isc_throw(isc::NotImplemented,
+                  "Data source doesn't support iteration");
+    }
+
+    /// Return an updater to make updates to a specific zone.
+    ///
+    /// The RR class of the zone is the one that the client is expected to
+    /// handle (see the detailed description of this class).
+    ///
+    /// If the specified zone is not found via the client, a NULL pointer
+    /// will be returned; in other words a completely new zone cannot be
+    /// created using an updater.  It must be created beforehand (even if
+    /// it's an empty placeholder) in a way specific to the underlying data
+    /// source.
+    ///
+    /// Conceptually, the updater will trigger a separate transaction for
+    /// subsequent updates to the zone within the context of the updater
+    /// (the actual implementation of the "transaction" may vary for the
+    /// specific underlying data source).  Until \c commit() is performed
+    /// on the updater, the intermediate updates won't affect the results
+    /// of other methods (and the result of the object's methods created
+    /// by other factory methods).  Likewise, if the updater is destructed
+    /// without performing \c commit(), the intermediate updates will be
+    /// effectively canceled and will never affect other methods.
+    ///
+    /// If the underlying data source allows concurrent updates, this method
+    /// can be called multiple times while the previously returned updater(s)
+    /// are still active.  In this case each updater triggers a different
+    /// "transaction".  Normally it would be for different zones for such a
+    /// case as handling multiple incoming AXFR streams concurrently, but
+    /// this interface does not even prohibit an attempt of getting more than
+    /// one updater for the same zone, as long as the underlying data source
+    /// allows such an operation (and any conflict resolution is left to the
+    /// specific derived class implementation).
+    ///
+    /// If \c replace is true, any existing RRs of the zone will be
+    /// deleted on successful completion of updates (after \c commit() on
+    /// the updater); if it's false, the existing RRs will be
+    /// intact unless explicitly deleted by \c deleteRRset() on the updater.
+    ///
+    /// A data source can be "read only" or can prohibit partial updates.
+    /// In such cases this method will result in an \c isc::NotImplemented
+    /// exception unconditionally or when \c replace is false).
+    ///
+    /// If \c journaling is true, the data source should store a journal
+    /// of changes. These can be used later on by, for example, IXFR-out.
+    /// However, the parameter is a hint only. It might be unable to store
+    /// them and they would be silently discarded. Or it might need to
+    /// store them no matter what (for example a git-based data source would
+    /// store journal implicitly). When the \c journaling is true, it
+    /// requires that the following update be formatted as IXFR transfer
+    /// (SOA to be removed, bunch of RRs to be removed, SOA to be added,
+    /// bunch of RRs to be added, and possibly repeated). However, it is not
+    /// required that the updater checks that. If it is false, it must not
+    /// require so and must accept any order of changes.
+    ///
+    /// We don't support erasing the whole zone (by replace being true) and
+    /// saving a journal at the same time. In such situation, BadValue is
+    /// thrown.
+    ///
+    /// \note To avoid throwing the exception accidentally with a lazy
+    /// implementation, we still keep this method pure virtual without
+    /// an implementation.  All derived classes must explicitly define this
+    /// method, even if it simply throws the NotImplemented exception.
+    ///
+    /// \exception NotImplemented The underlying data source does not support
+    /// updates.
+    /// \exception DataSourceError Internal error in the underlying data
+    /// source.
+    /// \exception std::bad_alloc Resource allocation failure.
+    /// \exception BadValue if both replace and journaling are true.
+    ///
+    /// \param name The zone name to be updated
+    /// \param replace Whether to delete existing RRs before making updates
+    /// \param journaling The zone updater should store a journal of the
+    ///     changes.
+    ///
+    /// \return A pointer to the updater; it will be NULL if the specified
+    /// zone isn't found.
+    virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+                                      bool replace, bool journaling = false)
+        const = 0;
+
+    /// Return a journal reader to retrieve differences of a zone.
+    ///
+    /// A derived version of this method creates a concrete
+    /// \c ZoneJournalReader object specific to the underlying data source
+    /// for the specified name of zone and differences between the versions
+    /// specified by the beginning and ending serials of the corresponding
+    /// SOA RRs.
+    /// The RR class of the zone is the one that the client is expected to
+    /// handle (see the detailed description of this class).
+    ///
+    /// Note that the SOA serials are compared by the semantics of the serial
+    /// number arithmetic.  So, for example, \c begin_serial can be larger than
+    /// \c end_serial as bare unsigned integers.  The underlying data source
+    /// implementation is assumed to keep track of sufficient history to
+    /// identify (if exist) the corresponding difference between the specified
+    /// versions.
+    ///
+    /// This method returns the result as a pair of a result code and
+    /// a pointer to a \c ZoneJournalReader object.  On success, the result
+    /// code is \c SUCCESS and the pointer must be non NULL; otherwise
+    /// the result code is something other than \c SUCCESS and the pinter
+    /// must be NULL.
+    ///
+    /// If the specified zone is not found in the data source, the result
+    /// code is \c NO_SUCH_ZONE.
+    /// Otherwise, if specified range of difference for the zone is not found
+    /// in the data source, the result code is \c NO_SUCH_VERSION.
+    ///
+    /// Handling differences is an optional feature of data source.
+    /// If the underlying data source does not support difference handling,
+    /// this method for that type of data source can throw an exception of
+    /// class \c NotImplemented.
+    ///
+    /// \exception NotImplemented The data source does not support differences.
+    /// \exception DataSourceError Other operational errors at the data source
+    /// level.
+    ///
+    /// \param zone The name of the zone for which the difference should be
+    /// retrieved.
+    /// \param begin_serial The SOA serial of the beginning version of the
+    /// differences.
+    /// \param end_serial The SOA serial of the ending version of the
+    /// differences.
+    ///
+    /// \return A pair of result code and a pointer to \c ZoneJournalReader.
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const = 0;
+};
+}
+}
+#endif  // DATA_SOURCE_CLIENT_H
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/data_source.h b/src/lib/datasrc/data_source.h
index ff695da..c35f0d3 100644
--- a/src/lib/datasrc/data_source.h
+++ b/src/lib/datasrc/data_source.h
@@ -53,6 +53,18 @@ public:
         isc::Exception(file, line, what) {}
 };
 
+/// \brief No such serial number when obtaining difference iterator
+///
+/// Thrown if either the zone/start serial number or zone/end serial number
+/// combination does not exist in the differences table.  (Note that this
+/// includes the case where the differences table contains no records related
+/// to that zone.)
+class NoSuchSerial : public DataSourceError {
+public:
+    NoSuchSerial(const char* file, size_t line, const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
 
 class AbstractDataSrc {
     ///
@@ -184,9 +196,9 @@ public:
     void setClass(isc::dns::RRClass& c) { rrclass = c; }
     void setClass(const isc::dns::RRClass& c) { rrclass = c; }
 
-    Result init() { return (NOT_IMPLEMENTED); }
-    Result init(isc::data::ConstElementPtr config);
-    Result close() { return (NOT_IMPLEMENTED); }
+    virtual Result init() { return (NOT_IMPLEMENTED); }
+    virtual Result init(isc::data::ConstElementPtr config);
+    virtual Result close() { return (NOT_IMPLEMENTED); }
 
     virtual Result findRRset(const isc::dns::Name& qname,
                              const isc::dns::RRClass& qclass,
@@ -351,7 +363,7 @@ public:
 
     /// \brief Returns the best enclosing zone name found for the given
     // name and RR class so far.
-    /// 
+    ///
     /// \return A pointer to the zone apex \c Name, NULL if none found yet.
     ///
     /// This method never throws an exception.
@@ -413,6 +425,6 @@ private:
 
 #endif
 
-// Local Variables: 
+// Local Variables:
 // mode: c++
-// End: 
+// End:
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
new file mode 100644
index 0000000..45ce0c2
--- /dev/null
+++ b/src/lib/datasrc/database.cc
@@ -0,0 +1,1174 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <utility>
+#include <vector>
+
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+
+#include <exceptions/exceptions.h>
+#include <dns/name.h>
+#include <dns/rrclass.h>
+#include <dns/rrttl.h>
+#include <dns/rrset.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <datasrc/data_source.h>
+#include <datasrc/logger.h>
+
+#include <boost/foreach.hpp>
+
+using namespace isc::dns;
+using namespace std;
+using boost::shared_ptr;
+using namespace isc::dns::rdata;
+
+namespace isc {
+namespace datasrc {
+
+DatabaseClient::DatabaseClient(RRClass rrclass,
+                               boost::shared_ptr<DatabaseAccessor>
+                               accessor) :
+    rrclass_(rrclass), accessor_(accessor)
+{
+    if (!accessor_) {
+        isc_throw(isc::InvalidParameter,
+                  "No database provided to DatabaseClient");
+    }
+}
+
+DataSourceClient::FindResult
+DatabaseClient::findZone(const Name& name) const {
+    std::pair<bool, int> zone(accessor_->getZone(name.toText()));
+    // Try exact first
+    if (zone.first) {
+        return (FindResult(result::SUCCESS,
+                           ZoneFinderPtr(new Finder(accessor_,
+                                                    zone.second, name))));
+    }
+    // Then super domains
+    // Start from 1, as 0 is covered above
+    for (size_t i(1); i < name.getLabelCount(); ++i) {
+        isc::dns::Name superdomain(name.split(i));
+        zone = accessor_->getZone(superdomain.toText());
+        if (zone.first) {
+            return (FindResult(result::PARTIALMATCH,
+                               ZoneFinderPtr(new Finder(accessor_,
+                                                        zone.second,
+                                                        superdomain))));
+        }
+    }
+    // No, really nothing
+    return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+}
+
+DatabaseClient::Finder::Finder(boost::shared_ptr<DatabaseAccessor> accessor,
+                               int zone_id, const isc::dns::Name& origin) :
+    accessor_(accessor),
+    zone_id_(zone_id),
+    origin_(origin)
+{ }
+
+namespace {
+// Adds the given Rdata to the given RRset
+// If the rrset is an empty pointer, a new one is
+// created with the given name, class, type and ttl
+// The type is checked if the rrset exists, but the
+// name is not.
+//
+// Then adds the given rdata to the set
+//
+// Raises a DataSourceError if the type does not
+// match, or if the given rdata string does not
+// parse correctly for the given type and class
+//
+// The DatabaseAccessor is passed to print the
+// database name in the log message if the TTL is
+// modified
+void addOrCreate(isc::dns::RRsetPtr& rrset,
+                    const isc::dns::Name& name,
+                    const isc::dns::RRClass& cls,
+                    const isc::dns::RRType& type,
+                    const isc::dns::RRTTL& ttl,
+                    const std::string& rdata_str,
+                    const DatabaseAccessor& db
+                )
+{
+    if (!rrset) {
+        rrset.reset(new isc::dns::RRset(name, cls, type, ttl));
+    } else {
+        // This is a check to make sure find() is not messing things up
+        assert(type == rrset->getType());
+        if (ttl != rrset->getTTL()) {
+            if (ttl < rrset->getTTL()) {
+                rrset->setTTL(ttl);
+            }
+            logger.warn(DATASRC_DATABASE_FIND_TTL_MISMATCH)
+                .arg(db.getDBName()).arg(name).arg(cls)
+                .arg(type).arg(rrset->getTTL());
+        }
+    }
+    try {
+        rrset->addRdata(isc::dns::rdata::createRdata(type, cls, rdata_str));
+    } catch (const isc::dns::rdata::InvalidRdataText& ivrt) {
+        // at this point, rrset may have been initialised for no reason,
+        // and won't be used. But the caller would drop the shared_ptr
+        // on such an error anyway, so we don't care.
+        isc_throw(DataSourceError,
+                    "bad rdata in database for " << name << " "
+                    << type << ": " << ivrt.what());
+    }
+}
+
+// This class keeps a short-lived store of RRSIG records encountered
+// during a call to find(). If the backend happens to return signatures
+// before the actual data, we might not know which signatures we will need
+// So if they may be relevant, we store the in this class.
+//
+// (If this class seems useful in other places, we might want to move
+// it to util. That would also provide an opportunity to add unit tests)
+class RRsigStore {
+public:
+    // Adds the given signature Rdata to the store
+    // The signature rdata MUST be of the RRSIG rdata type
+    // (the caller must make sure of this).
+    // NOTE: if we move this class to a public namespace,
+    // we should add a type_covered argument, so as not
+    // to have to do this cast here.
+    void addSig(isc::dns::rdata::RdataPtr sig_rdata) {
+        const isc::dns::RRType& type_covered =
+            static_cast<isc::dns::rdata::generic::RRSIG*>(
+                sig_rdata.get())->typeCovered();
+        sigs[type_covered].push_back(sig_rdata);
+    }
+
+    // If the store contains signatures for the type of the given
+    // rrset, they are appended to it.
+    void appendSignatures(isc::dns::RRsetPtr& rrset) const {
+        std::map<isc::dns::RRType,
+                 std::vector<isc::dns::rdata::RdataPtr> >::const_iterator
+            found = sigs.find(rrset->getType());
+        if (found != sigs.end()) {
+            BOOST_FOREACH(isc::dns::rdata::RdataPtr sig, found->second) {
+                rrset->addRRsig(sig);
+            }
+        }
+    }
+
+private:
+    std::map<isc::dns::RRType, std::vector<isc::dns::rdata::RdataPtr> > sigs;
+};
+}
+
+DatabaseClient::Finder::FoundRRsets
+DatabaseClient::Finder::getRRsets(const string& name, const WantedTypes& types,
+                                  bool check_ns, const string* construct_name)
+{
+    RRsigStore sig_store;
+    bool records_found = false;
+    std::map<RRType, RRsetPtr> result;
+
+    // Request the context
+    DatabaseAccessor::IteratorContextPtr
+        context(accessor_->getRecords(name, zone_id_));
+    // It must not return NULL, that's a bug of the implementation
+    if (!context) {
+        isc_throw(isc::Unexpected, "Iterator context null at " + name);
+    }
+
+    std::string columns[DatabaseAccessor::COLUMN_COUNT];
+    if (construct_name == NULL) {
+        construct_name = &name;
+    }
+
+    const Name construct_name_object(*construct_name);
+
+    bool seen_cname(false);
+    bool seen_ds(false);
+    bool seen_other(false);
+    bool seen_ns(false);
+
+    while (context->getNext(columns)) {
+        // The domain is not empty
+        records_found = true;
+
+        try {
+            const RRType cur_type(columns[DatabaseAccessor::TYPE_COLUMN]);
+
+            if (cur_type == RRType::RRSIG()) {
+                // If we get signatures before we get the actual data, we
+                // can't know which ones to keep and which to drop...
+                // So we keep a separate store of any signature that may be
+                // relevant and add them to the final RRset when we are
+                // done.
+                // A possible optimization here is to not store them for
+                // types we are certain we don't need
+                sig_store.addSig(rdata::createRdata(cur_type, getClass(),
+                     columns[DatabaseAccessor::RDATA_COLUMN]));
+            }
+
+            if (types.find(cur_type) != types.end()) {
+                // This type is requested, so put it into result
+                const RRTTL cur_ttl(columns[DatabaseAccessor::TTL_COLUMN]);
+                // Ths sigtype column was an optimization for finding the
+                // relevant RRSIG RRs for a lookup. Currently this column is
+                // not used in this revised datasource implementation. We
+                // should either start using it again, or remove it from use
+                // completely (i.e. also remove it from the schema and the
+                // backend implementation).
+                // Note that because we don't use it now, we also won't notice
+                // it if the value is wrong (i.e. if the sigtype column
+                // contains an rrtype that is different from the actual value
+                // of the 'type covered' field in the RRSIG Rdata).
+                //cur_sigtype(columns[SIGTYPE_COLUMN]);
+                addOrCreate(result[cur_type], construct_name_object,
+                            getClass(), cur_type, cur_ttl,
+                            columns[DatabaseAccessor::RDATA_COLUMN],
+                            *accessor_);
+            }
+
+            if (cur_type == RRType::CNAME()) {
+                seen_cname = true;
+            } else if (cur_type == RRType::NS()) {
+                seen_ns = true;
+            } else if (cur_type == RRType::DS()) {
+                seen_ds = true;
+            } else if (cur_type != RRType::RRSIG() &&
+                       cur_type != RRType::NSEC3() &&
+                       cur_type != RRType::NSEC()) {
+                // NSEC and RRSIG can coexist with anything, otherwise
+                // we've seen something that can't live together with potential
+                // CNAME or NS
+                //
+                // NSEC3 lives in separate namespace from everything, therefore
+                // we just ignore it here for these checks as well.
+                seen_other = true;
+            }
+        } catch (const InvalidRRType&) {
+            isc_throw(DataSourceError, "Invalid RRType in database for " <<
+                      name << ": " << columns[DatabaseAccessor::
+                      TYPE_COLUMN]);
+        } catch (const InvalidRRTTL&) {
+            isc_throw(DataSourceError, "Invalid TTL in database for " <<
+                      name << ": " << columns[DatabaseAccessor::
+                      TTL_COLUMN]);
+        } catch (const rdata::InvalidRdataText&) {
+            isc_throw(DataSourceError, "Invalid rdata in database for " <<
+                      name << ": " << columns[DatabaseAccessor::
+                      RDATA_COLUMN]);
+        }
+    }
+    if (seen_cname && (seen_other || seen_ns || seen_ds)) {
+        isc_throw(DataSourceError, "CNAME shares domain " << name <<
+                  " with something else");
+    }
+    if (check_ns && seen_ns && seen_other) {
+        isc_throw(DataSourceError, "NS shares domain " << name <<
+                  " with something else");
+    }
+    // Add signatures to all found RRsets
+    for (std::map<RRType, RRsetPtr>::iterator i(result.begin());
+         i != result.end(); ++ i) {
+        sig_store.appendSignatures(i->second);
+    }
+
+    return (FoundRRsets(records_found, result));
+}
+
+bool
+DatabaseClient::Finder::hasSubdomains(const std::string& name) {
+    // Request the context
+    DatabaseAccessor::IteratorContextPtr
+        context(accessor_->getRecords(name, zone_id_, true));
+    // It must not return NULL, that's a bug of the implementation
+    if (!context) {
+        isc_throw(isc::Unexpected, "Iterator context null at " + name);
+    }
+
+    std::string columns[DatabaseAccessor::COLUMN_COUNT];
+    return (context->getNext(columns));
+}
+
+// Some manipulation with RRType sets
+namespace {
+
+// Bunch of functions to construct specific sets of RRTypes we will
+// ask from it.
+typedef std::set<RRType> WantedTypes;
+
+const WantedTypes&
+NSEC_TYPES() {
+    static bool initialized(false);
+    static WantedTypes result;
+
+    if (!initialized) {
+        result.insert(RRType::NSEC());
+        initialized = true;
+    }
+    return (result);
+}
+
+const WantedTypes&
+DELEGATION_TYPES() {
+    static bool initialized(false);
+    static WantedTypes result;
+
+    if (!initialized) {
+        result.insert(RRType::DNAME());
+        result.insert(RRType::NS());
+        initialized = true;
+    }
+    return (result);
+}
+
+const WantedTypes&
+FINAL_TYPES() {
+    static bool initialized(false);
+    static WantedTypes result;
+
+    if (!initialized) {
+        result.insert(RRType::CNAME());
+        result.insert(RRType::NS());
+        result.insert(RRType::NSEC());
+        initialized = true;
+    }
+    return (result);
+}
+
+}
+
+RRsetPtr
+DatabaseClient::Finder::findNSECCover(const Name& name) {
+    try {
+        // Which one should contain the NSEC record?
+        const Name coverName(findPreviousName(name));
+        // Get the record and copy it out
+        const FoundRRsets found = getRRsets(coverName.toText(), NSEC_TYPES(),
+                                            coverName != getOrigin());
+        const FoundIterator
+            nci(found.second.find(RRType::NSEC()));
+        if (nci != found.second.end()) {
+            return (nci->second);
+        } else {
+            // The previous doesn't contain NSEC.
+            // Badly signed zone or a bug?
+
+            // FIXME: Currently, if the zone is not signed, we could get
+            // here. In that case we can't really throw, but for now, we can't
+            // recognize it. So we don't throw at all, enable it once
+            // we have a is_signed flag or something.
+#if 0
+            isc_throw(DataSourceError, "No NSEC in " +
+                      coverName.toText() + ", but it was "
+                      "returned as previous - "
+                      "accessor error? Badly signed zone?");
+#endif
+        }
+    }
+    catch (const isc::NotImplemented&) {
+        // Well, they want DNSSEC, but there is no available.
+        // So we don't provide anything.
+        LOG_INFO(logger, DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED).
+            arg(accessor_->getDBName()).arg(name);
+    }
+    // We didn't find it, return nothing
+    return (RRsetPtr());
+}
+
+ZoneFinder::FindResult
+DatabaseClient::Finder::find(const isc::dns::Name& name,
+                             const isc::dns::RRType& type,
+                             isc::dns::RRsetList*,
+                             const FindOptions options)
+{
+    // This variable is used to determine the difference between
+    // NXDOMAIN and NXRRSET
+    bool records_found = false;
+    bool glue_ok((options & FIND_GLUE_OK) != 0);
+    const bool dnssec_data((options & FIND_DNSSEC) != 0);
+    bool get_cover(false);
+    isc::dns::RRsetPtr result_rrset;
+    ZoneFinder::Result result_status = SUCCESS;
+    FoundRRsets found;
+    logger.debug(DBG_TRACE_DETAILED, DATASRC_DATABASE_FIND_RECORDS)
+        .arg(accessor_->getDBName()).arg(name).arg(type);
+    // In case we are in GLUE_OK mode and start matching wildcards,
+    // we can't do it under NS, so we store it here to check
+    isc::dns::RRsetPtr first_ns;
+
+    // First, do we have any kind of delegation (NS/DNAME) here?
+    const Name origin(getOrigin());
+    const size_t origin_label_count(origin.getLabelCount());
+    // Number of labels in the last known non-empty domain
+    size_t last_known(origin_label_count);
+    const size_t current_label_count(name.getLabelCount());
+    // This is how many labels we remove to get origin
+    const size_t remove_labels(current_label_count - origin_label_count);
+
+    // Now go trough all superdomains from origin down
+    for (int i(remove_labels); i > 0; --i) {
+        Name superdomain(name.split(i));
+        // Look if there's NS or DNAME (but ignore the NS in origin)
+        found = getRRsets(superdomain.toText(), DELEGATION_TYPES(),
+                          i != remove_labels);
+        if (found.first) {
+            // It contains some RRs, so it exists.
+            last_known = superdomain.getLabelCount();
+
+            const FoundIterator nsi(found.second.find(RRType::NS()));
+            const FoundIterator dni(found.second.find(RRType::DNAME()));
+            // In case we are in GLUE_OK mode, we want to store the
+            // highest encountered NS (but not apex)
+            if (glue_ok && !first_ns && i != remove_labels &&
+                nsi != found.second.end()) {
+                first_ns = nsi->second;
+            } else if (!glue_ok && i != remove_labels &&
+                       nsi != found.second.end()) {
+                // Do a NS delegation, but ignore NS in glue_ok mode. Ignore
+                // delegation in apex
+                LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                          DATASRC_DATABASE_FOUND_DELEGATION).
+                    arg(accessor_->getDBName()).arg(superdomain);
+                result_rrset = nsi->second;
+                result_status = DELEGATION;
+                // No need to go lower, found
+                break;
+            } else if (dni != found.second.end()) {
+                // Very similar with DNAME
+                LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                          DATASRC_DATABASE_FOUND_DNAME).
+                    arg(accessor_->getDBName()).arg(superdomain);
+                result_rrset = dni->second;
+                result_status = DNAME;
+                if (result_rrset->getRdataCount() != 1) {
+                    isc_throw(DataSourceError, "DNAME at " << superdomain <<
+                              " has " << result_rrset->getRdataCount() <<
+                              " rdata, 1 expected");
+                }
+                break;
+            }
+        }
+    }
+
+    if (!result_rrset) { // Only if we didn't find a redirect already
+        // Try getting the final result and extract it
+        // It is special if there's a CNAME or NS, DNAME is ignored here
+        // And we don't consider the NS in origin
+
+        WantedTypes final_types(FINAL_TYPES());
+        final_types.insert(type);
+        found = getRRsets(name.toText(), final_types, name != origin);
+        records_found = found.first;
+
+        // NS records, CNAME record and Wanted Type records
+        const FoundIterator nsi(found.second.find(RRType::NS()));
+        const FoundIterator cni(found.second.find(RRType::CNAME()));
+        const FoundIterator wti(found.second.find(type));
+        if (name != origin && !glue_ok && nsi != found.second.end()) {
+            // There's a delegation at the exact node.
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                      DATASRC_DATABASE_FOUND_DELEGATION_EXACT).
+                arg(accessor_->getDBName()).arg(name);
+            result_status = DELEGATION;
+            result_rrset = nsi->second;
+        } else if (type != isc::dns::RRType::CNAME() &&
+                   cni != found.second.end()) {
+            // A CNAME here
+            result_status = CNAME;
+            result_rrset = cni->second;
+            if (result_rrset->getRdataCount() != 1) {
+                isc_throw(DataSourceError, "CNAME with " <<
+                          result_rrset->getRdataCount() <<
+                          " rdata at " << name << ", expected 1");
+            }
+        } else if (wti != found.second.end()) {
+            // Just get the answer
+            result_rrset = wti->second;
+        } else if (!records_found) {
+            // Nothing lives here.
+            // But check if something lives below this
+            // domain and if so, pretend something is here as well.
+            if (hasSubdomains(name.toText())) {
+                LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                          DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL).
+                    arg(accessor_->getDBName()).arg(name);
+                records_found = true;
+                get_cover = dnssec_data;
+            } else if ((options & NO_WILDCARD) != 0) {
+                // If wildcard check is disabled, the search will ultimately
+                // terminate with NXDOMAIN. If DNSSEC is enabled, flag that
+                // we need to get the NSEC records to prove this.
+                if (dnssec_data) {
+                    get_cover = true;
+                }
+            } else {
+                // It's not empty non-terminal. So check for wildcards.
+                // We remove labels one by one and look for the wildcard there.
+                // Go up to first non-empty domain.
+                for (size_t i(1); i + last_known <= current_label_count; ++i) {
+                    // Construct the name with *
+                    const Name superdomain(name.split(i));
+                    const string wildcard("*." + superdomain.toText());
+                    const string construct_name(name.toText());
+                    // TODO What do we do about DNAME here?
+                    // The types are the same as with original query
+                    found = getRRsets(wildcard, final_types, true,
+                                      &construct_name);
+                    if (found.first) {
+                        if (first_ns) {
+                            // In case we are under NS, we don't
+                            // wildcard-match, but return delegation
+                            result_rrset = first_ns;
+                            result_status = DELEGATION;
+                            records_found = true;
+                            // We pretend to switch to non-glue_ok mode
+                            glue_ok = false;
+                            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                                      DATASRC_DATABASE_WILDCARD_CANCEL_NS).
+                                arg(accessor_->getDBName()).arg(wildcard).
+                                arg(first_ns->getName());
+                        } else if (!hasSubdomains(name.split(i - 1).toText()))
+                        {
+                            // Nothing we added as part of the * can exist
+                            // directly, as we go up only to first existing
+                            // domain, but it could be empty non-terminal. In
+                            // that case, we need to cancel the match.
+                            records_found = true;
+                            const FoundIterator
+                                cni(found.second.find(RRType::CNAME()));
+                            const FoundIterator
+                                nsi(found.second.find(RRType::NS()));
+                            const FoundIterator
+                                nci(found.second.find(RRType::NSEC()));
+                            const FoundIterator wti(found.second.find(type));
+                            if (cni != found.second.end() &&
+                                type != RRType::CNAME()) {
+                                result_rrset = cni->second;
+                                result_status = WILDCARD_CNAME;
+                            } else if (nsi != found.second.end()) {
+                                result_rrset = nsi->second;
+                                result_status = DELEGATION;
+                            } else if (wti != found.second.end()) {
+                                result_rrset = wti->second;
+                                result_status = WILDCARD;
+                            } else {
+                                // NXRRSET case in the wildcard
+                                result_status = WILDCARD_NXRRSET;
+                                if (dnssec_data &&
+                                    nci != found.second.end()) {
+                                    // User wants a proof the wildcard doesn't
+                                    // contain it
+                                    //
+                                    // However, we need to get the RRset in the
+                                    // name of the wildcard, not the constructed
+                                    // one, so we walk it again
+                                    found = getRRsets(wildcard, NSEC_TYPES(),
+                                                      true);
+                                    result_rrset =
+                                        found.second.find(RRType::NSEC())->
+                                        second;
+                                }
+                            }
+
+                            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                                      DATASRC_DATABASE_WILDCARD).
+                                arg(accessor_->getDBName()).arg(wildcard).
+                                arg(name);
+                        } else {
+                            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                                      DATASRC_DATABASE_WILDCARD_CANCEL_SUB).
+                                arg(accessor_->getDBName()).arg(wildcard).
+                                arg(name).arg(superdomain);
+                        }
+                        break;
+                    } else if (hasSubdomains(wildcard)) {
+                        // Empty non-terminal asterisk
+                        records_found = true;
+                        LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                                  DATASRC_DATABASE_WILDCARD_EMPTY).
+                            arg(accessor_->getDBName()).arg(wildcard).
+                            arg(name);
+                        if (dnssec_data) {
+                            result_rrset = findNSECCover(Name(wildcard));
+                            if (result_rrset) {
+                                result_status = WILDCARD_NXRRSET;
+                            }
+                        }
+                        break;
+                    }
+                }
+                // This is the NXDOMAIN case (nothing found anywhere). If
+                // they want DNSSEC data, try getting the NSEC record
+                if (dnssec_data && !records_found) {
+                    get_cover = true;
+                }
+            }
+        } else if (dnssec_data) {
+            // This is the "usual" NXRRSET case
+            // So in case they want DNSSEC, provide the NSEC
+            // (which should be available already here)
+            result_status = NXRRSET;
+            const FoundIterator nci(found.second.find(RRType::NSEC()));
+            if (nci != found.second.end()) {
+                result_rrset = nci->second;
+            }
+        }
+    }
+
+    if (!result_rrset) {
+        if (result_status == SUCCESS) {
+            // Should we look for NSEC covering the name?
+            if (get_cover) {
+                result_rrset = findNSECCover(name);
+                if (result_rrset) {
+                    result_status = NXDOMAIN;
+                }
+            }
+            // Something is not here and we didn't decide yet what
+            if (records_found) {
+                logger.debug(DBG_TRACE_DETAILED,
+                             DATASRC_DATABASE_FOUND_NXRRSET)
+                    .arg(accessor_->getDBName()).arg(name)
+                    .arg(getClass()).arg(type);
+                result_status = NXRRSET;
+            } else {
+                logger.debug(DBG_TRACE_DETAILED,
+                             DATASRC_DATABASE_FOUND_NXDOMAIN)
+                    .arg(accessor_->getDBName()).arg(name)
+                    .arg(getClass()).arg(type);
+                result_status = NXDOMAIN;
+            }
+        }
+    } else {
+        logger.debug(DBG_TRACE_DETAILED,
+                     DATASRC_DATABASE_FOUND_RRSET)
+                    .arg(accessor_->getDBName()).arg(*result_rrset);
+    }
+    return (FindResult(result_status, result_rrset));
+}
+
+Name
+DatabaseClient::Finder::findPreviousName(const Name& name) const {
+    const string str(accessor_->findPreviousName(zone_id_,
+                                                 name.reverse().toText()));
+    try {
+        return (Name(str));
+    }
+    /*
+     * To avoid having the same code many times, we just catch all the
+     * exceptions and handle them in a common code below
+     */
+    catch (const isc::dns::EmptyLabel&) {}
+    catch (const isc::dns::TooLongLabel&) {}
+    catch (const isc::dns::BadLabelType&) {}
+    catch (const isc::dns::BadEscape&) {}
+    catch (const isc::dns::TooLongName&) {}
+    catch (const isc::dns::IncompleteName&) {}
+    isc_throw(DataSourceError, "Bad name " + str + " from findPreviousName");
+}
+
+Name
+DatabaseClient::Finder::getOrigin() const {
+    return (origin_);
+}
+
+isc::dns::RRClass
+DatabaseClient::Finder::getClass() const {
+    // TODO Implement
+    return isc::dns::RRClass::IN();
+}
+
+namespace {
+
+/*
+ * This needs, beside of converting all data from textual representation, group
+ * together rdata of the same RRsets. To do this, we hold one row of data ahead
+ * of iteration. When we get a request to provide data, we create it from this
+ * data and load a new one. If it is to be put to the same rrset, we add it.
+ * Otherwise we just return what we have and keep the row as the one ahead
+ * for next time.
+ */
+class DatabaseIterator : public ZoneIterator {
+public:
+    DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
+                     const Name& zone_name,
+                     const RRClass& rrclass,
+                     bool separate_rrs) :
+        accessor_(accessor),
+        class_(rrclass),
+        ready_(true),
+        separate_rrs_(separate_rrs)
+    {
+        // Get the zone
+        const pair<bool, int> zone(accessor_->getZone(zone_name.toText()));
+        if (!zone.first) {
+            // No such zone, can't continue
+            isc_throw(DataSourceError, "Zone " + zone_name.toText() +
+                      " can not be iterated, because it doesn't exist "
+                      "in this data source");
+        }
+
+        // Start a separate transaction.
+        accessor_->startTransaction();
+
+        // Find the SOA of the zone (may or may not succeed).  Note that
+        // this must be done before starting the iteration context.
+        soa_ = DatabaseClient::Finder(accessor_, zone.second, zone_name).
+            find(zone_name, RRType::SOA(), NULL).rrset;
+
+        // Request the context
+        context_ = accessor_->getAllRecords(zone.second);
+        // It must not return NULL, that's a bug of the implementation
+        if (!context_) {
+            isc_throw(isc::Unexpected, "Iterator context null at " +
+                      zone_name.toText());
+        }
+
+        // Prepare data for the next time
+        getData();
+    }
+
+    virtual ~DatabaseIterator() {
+        if (ready_) {
+            accessor_->commit();
+        }
+    }
+
+    virtual ConstRRsetPtr getSOA() const {
+        return (soa_);
+    }
+
+    virtual isc::dns::ConstRRsetPtr getNextRRset() {
+        if (!ready_) {
+            isc_throw(isc::Unexpected, "Iterating past the zone end");
+        }
+        if (!data_ready_) {
+            // At the end of zone
+            accessor_->commit();
+            ready_ = false;
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                      DATASRC_DATABASE_ITERATE_END);
+            return (ConstRRsetPtr());
+        }
+        const string name_str(name_), rtype_str(rtype_), ttl(ttl_);
+        const Name name(name_str);
+        const RRType rtype(rtype_str);
+        RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
+        while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
+            if (ttl_ != ttl) {
+                if (ttl < ttl_) {
+                    ttl_ = ttl;
+                    rrset->setTTL(RRTTL(ttl));
+                }
+                LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+                    arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+            }
+            rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
+            getData();
+            if (separate_rrs_) {
+                break;
+            }
+        }
+        LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
+            arg(rrset->getName()).arg(rrset->getType());
+        return (rrset);
+    }
+
+private:
+    // Load next row of data
+    void getData() {
+        string data[DatabaseAccessor::COLUMN_COUNT];
+        data_ready_ = context_->getNext(data);
+        name_ = data[DatabaseAccessor::NAME_COLUMN];
+        rtype_ = data[DatabaseAccessor::TYPE_COLUMN];
+        ttl_ = data[DatabaseAccessor::TTL_COLUMN];
+        rdata_ = data[DatabaseAccessor::RDATA_COLUMN];
+    }
+
+    // The dedicated accessor
+    shared_ptr<DatabaseAccessor> accessor_;
+    // The context
+    DatabaseAccessor::IteratorContextPtr context_;
+    // Class of the zone
+    const RRClass class_;
+    // SOA of the zone, if any (it should normally exist)
+    ConstRRsetPtr soa_;
+    // Status
+    bool ready_, data_ready_;
+    // Data of the next row
+    string name_, rtype_, rdata_, ttl_;
+    // Whether to modify differing TTL values, or treat a different TTL as
+    // a different RRset
+    bool separate_rrs_;
+};
+
+}
+
+ZoneIteratorPtr
+DatabaseClient::getIterator(const isc::dns::Name& name,
+                            bool separate_rrs) const
+{
+    ZoneIteratorPtr iterator = ZoneIteratorPtr(new DatabaseIterator(
+                                                   accessor_->clone(), name,
+                                                   rrclass_, separate_rrs));
+    LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
+        arg(name);
+
+    return (iterator);
+}
+
+//
+// Zone updater using some database system as the underlying data source.
+//
+class DatabaseUpdater : public ZoneUpdater {
+public:
+    DatabaseUpdater(shared_ptr<DatabaseAccessor> accessor, int zone_id,
+            const Name& zone_name, const RRClass& zone_class,
+            bool journaling) :
+        committed_(false), accessor_(accessor), zone_id_(zone_id),
+        db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
+        zone_class_(zone_class), journaling_(journaling),
+        diff_phase_(NOT_STARTED), serial_(0),
+        finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
+    {
+        logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
+            .arg(zone_name_).arg(zone_class_).arg(db_name_);
+    }
+
+    virtual ~DatabaseUpdater() {
+        if (!committed_) {
+            try {
+                accessor_->rollback();
+                logger.info(DATASRC_DATABASE_UPDATER_ROLLBACK)
+                    .arg(zone_name_).arg(zone_class_).arg(db_name_);
+            } catch (const DataSourceError& e) {
+                // We generally expect that rollback always succeeds, and
+                // it should in fact succeed in a way we execute it.  But
+                // as the public API allows rollback() to fail and
+                // throw, we should expect it.  Obviously we cannot re-throw
+                // it.  The best we can do is to log it as a critical error.
+                logger.error(DATASRC_DATABASE_UPDATER_ROLLBACKFAIL)
+                    .arg(zone_name_).arg(zone_class_).arg(db_name_)
+                    .arg(e.what());
+            }
+        }
+
+        logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_DESTROYED)
+            .arg(zone_name_).arg(zone_class_).arg(db_name_);
+    }
+
+    virtual ZoneFinder& getFinder() { return (*finder_); }
+
+    virtual void addRRset(const RRset& rrset);
+    virtual void deleteRRset(const RRset& rrset);
+    virtual void commit();
+
+private:
+    // A short cut typedef only for making the code shorter.
+    typedef DatabaseAccessor Accessor;
+
+    bool committed_;
+    shared_ptr<DatabaseAccessor> accessor_;
+    const int zone_id_;
+    const string db_name_;
+    const string zone_name_;
+    const RRClass zone_class_;
+    const bool journaling_;
+    // For the journals
+    enum DiffPhase {
+        NOT_STARTED,
+        DELETE,
+        ADD
+    };
+    DiffPhase diff_phase_;
+    Serial serial_;
+    boost::scoped_ptr<DatabaseClient::Finder> finder_;
+
+    // This is a set of validation checks commonly used for addRRset() and
+    // deleteRRset to minimize duplicate code logic and to make the main
+    // code concise.
+    void validateAddOrDelete(const char* const op_str, const RRset& rrset,
+                             DiffPhase prev_phase,
+                             DiffPhase current_phase) const;
+};
+
+void
+DatabaseUpdater::validateAddOrDelete(const char* const op_str,
+                                     const RRset& rrset,
+                                     DiffPhase prev_phase,
+                                     DiffPhase current_phase) const
+{
+    if (committed_) {
+        isc_throw(DataSourceError, op_str << " attempt after commit to zone: "
+                  << zone_name_ << "/" << zone_class_);
+    }
+    if (rrset.getRdataCount() == 0) {
+        isc_throw(DataSourceError, op_str << " attempt with an empty RRset: "
+                  << rrset.getName() << "/" << zone_class_ << "/"
+                  << rrset.getType());
+    }
+    if (rrset.getClass() != zone_class_) {
+        isc_throw(DataSourceError, op_str << " attempt for a different class "
+                  << zone_name_ << "/" << zone_class_ << ": "
+                  << rrset.toText());
+    }
+    if (rrset.getRRsig()) {
+        isc_throw(DataSourceError, op_str << " attempt for RRset with RRSIG "
+                  << zone_name_ << "/" << zone_class_ << ": "
+                  << rrset.toText());
+    }
+    if (journaling_) {
+        const RRType rrtype(rrset.getType());
+        if (rrtype == RRType::SOA() && diff_phase_ != prev_phase) {
+            isc_throw(isc::BadValue, op_str << " attempt in an invalid "
+                      << "diff phase: " << diff_phase_ << ", rrset: " <<
+                      rrset.toText());
+        }
+        if (rrtype != RRType::SOA() && diff_phase_ != current_phase) {
+            isc_throw(isc::BadValue, "diff state change by non SOA: "
+                      << rrset.toText());
+        }
+    }
+}
+
+void
+DatabaseUpdater::addRRset(const RRset& rrset) {
+    validateAddOrDelete("add", rrset, DELETE, ADD);
+
+    // It's guaranteed rrset has at least one RDATA at this point.
+    RdataIteratorPtr it = rrset.getRdataIterator();
+
+    string columns[Accessor::ADD_COLUMN_COUNT]; // initialized with ""
+    columns[Accessor::ADD_NAME] = rrset.getName().toText();
+    columns[Accessor::ADD_REV_NAME] = rrset.getName().reverse().toText();
+    columns[Accessor::ADD_TTL] = rrset.getTTL().toText();
+    columns[Accessor::ADD_TYPE] = rrset.getType().toText();
+    string journal[Accessor::DIFF_PARAM_COUNT];
+    if (journaling_) {
+        journal[Accessor::DIFF_NAME] = columns[Accessor::ADD_NAME];
+        journal[Accessor::DIFF_TYPE] = columns[Accessor::ADD_TYPE];
+        journal[Accessor::DIFF_TTL] = columns[Accessor::ADD_TTL];
+        diff_phase_ = ADD;
+        if (rrset.getType() == RRType::SOA()) {
+            serial_ =
+                dynamic_cast<const generic::SOA&>(it->getCurrent()).
+                getSerial();
+        }
+    }
+    for (; !it->isLast(); it->next()) {
+        if (rrset.getType() == RRType::RRSIG()) {
+            // XXX: the current interface (based on the current sqlite3
+            // data source schema) requires a separate "sigtype" column,
+            // even though it won't be used in a newer implementation.
+            // We should eventually clean up the schema design and simplify
+            // the interface, but until then we have to conform to the schema.
+            const generic::RRSIG& rrsig_rdata =
+                dynamic_cast<const generic::RRSIG&>(it->getCurrent());
+            columns[Accessor::ADD_SIGTYPE] =
+                rrsig_rdata.typeCovered().toText();
+        }
+        columns[Accessor::ADD_RDATA] = it->getCurrent().toText();
+        if (journaling_) {
+            journal[Accessor::DIFF_RDATA] = columns[Accessor::ADD_RDATA];
+            accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+                                     Accessor::DIFF_ADD, journal);
+        }
+        accessor_->addRecordToZone(columns);
+    }
+}
+
+void
+DatabaseUpdater::deleteRRset(const RRset& rrset) {
+    // If this is the first operation, pretend we are starting a new delete
+    // sequence after adds.  This will simplify the validation below.
+    if (diff_phase_ == NOT_STARTED) {
+        diff_phase_ = ADD;
+    }
+
+    validateAddOrDelete("delete", rrset, ADD, DELETE);
+
+    RdataIteratorPtr it = rrset.getRdataIterator();
+
+    string params[Accessor::DEL_PARAM_COUNT]; // initialized with ""
+    params[Accessor::DEL_NAME] = rrset.getName().toText();
+    params[Accessor::DEL_TYPE] = rrset.getType().toText();
+    string journal[Accessor::DIFF_PARAM_COUNT];
+    if (journaling_) {
+        journal[Accessor::DIFF_NAME] = params[Accessor::DEL_NAME];
+        journal[Accessor::DIFF_TYPE] = params[Accessor::DEL_TYPE];
+        journal[Accessor::DIFF_TTL] = rrset.getTTL().toText();
+        diff_phase_ = DELETE;
+        if (rrset.getType() == RRType::SOA()) {
+            serial_ =
+                dynamic_cast<const generic::SOA&>(it->getCurrent()).
+                getSerial();
+        }
+    }
+    for (; !it->isLast(); it->next()) {
+        params[Accessor::DEL_RDATA] = it->getCurrent().toText();
+        if (journaling_) {
+            journal[Accessor::DIFF_RDATA] = params[Accessor::DEL_RDATA];
+            accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+                                     Accessor::DIFF_DELETE, journal);
+        }
+        accessor_->deleteRecordInZone(params);
+    }
+}
+
+void
+DatabaseUpdater::commit() {
+    if (committed_) {
+        isc_throw(DataSourceError, "Duplicate commit attempt for "
+                  << zone_name_ << "/" << zone_class_ << " on "
+                  << db_name_);
+    }
+    if (journaling_ && diff_phase_ == DELETE) {
+        isc_throw(isc::BadValue, "Update sequence not complete");
+    }
+    accessor_->commit();
+    committed_ = true; // make sure the destructor won't trigger rollback
+
+    // We release the accessor immediately after commit is completed so that
+    // we don't hold the possible internal resource any longer.
+    accessor_.reset();
+
+    logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_COMMIT)
+        .arg(zone_name_).arg(zone_class_).arg(db_name_);
+}
+
+// The updater factory
+ZoneUpdaterPtr
+DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace,
+                           bool journaling) const
+{
+    if (replace && journaling) {
+        isc_throw(isc::BadValue, "Can't store journal and replace the whole "
+                  "zone at the same time");
+    }
+    shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
+    const std::pair<bool, int> zone(update_accessor->startUpdateZone(
+                                        name.toText(), replace));
+    if (!zone.first) {
+        return (ZoneUpdaterPtr());
+    }
+
+    return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
+                                               name, rrclass_, journaling)));
+}
+
+//
+// Zone journal reader using some database system as the underlying data
+//  source.
+//
+class DatabaseJournalReader : public ZoneJournalReader {
+private:
+    // A shortcut typedef to keep the code concise.
+    typedef DatabaseAccessor Accessor;
+public:
+    DatabaseJournalReader(shared_ptr<Accessor> accessor, const Name& zone,
+                          int zone_id, const RRClass& rrclass, uint32_t begin,
+                          uint32_t end) :
+        accessor_(accessor), zone_(zone), rrclass_(rrclass),
+        begin_(begin), end_(end), finished_(false)
+    {
+        context_ = accessor_->getDiffs(zone_id, begin, end);
+    }
+    virtual ~DatabaseJournalReader() {}
+    virtual ConstRRsetPtr getNextDiff() {
+        if (finished_) {
+            isc_throw(InvalidOperation,
+                      "Diff read attempt past the end of sequence on "
+                      << accessor_->getDBName());
+        }
+
+        string data[Accessor::COLUMN_COUNT];
+        if (!context_->getNext(data)) {
+            finished_ = true;
+            LOG_DEBUG(logger, DBG_TRACE_BASIC,
+                      DATASRC_DATABASE_JOURNALREADER_END).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+                arg(begin_).arg(end_);
+            return (ConstRRsetPtr());
+        }
+
+        try {
+            RRsetPtr rrset(new RRset(Name(data[Accessor::NAME_COLUMN]),
+                                     rrclass_,
+                                     RRType(data[Accessor::TYPE_COLUMN]),
+                                     RRTTL(data[Accessor::TTL_COLUMN])));
+            rrset->addRdata(rdata::createRdata(rrset->getType(), rrclass_,
+                                               data[Accessor::RDATA_COLUMN]));
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                      DATASRC_DATABASE_JOURNALREADER_NEXT).
+                arg(rrset->getName()).arg(rrset->getType()).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName());
+            return (rrset);
+        } catch (const Exception& ex) {
+            LOG_ERROR(logger, DATASRC_DATABASE_JOURNALREADR_BADDATA).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+                arg(begin_).arg(end_).arg(ex.what());
+            isc_throw(DataSourceError, "Failed to create RRset from diff on "
+                      << accessor_->getDBName());
+        }
+    }
+
+private:
+    shared_ptr<Accessor> accessor_;
+    const Name zone_;
+    const RRClass rrclass_;
+    Accessor::IteratorContextPtr context_;
+    const uint32_t begin_;
+    const uint32_t end_;
+    bool finished_;
+};
+
+// The JournalReader factory
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+DatabaseClient::getJournalReader(const isc::dns::Name& zone,
+                                 uint32_t begin_serial,
+                                 uint32_t end_serial) const
+{
+    shared_ptr<DatabaseAccessor> jnl_accessor(accessor_->clone());
+    const pair<bool, int> zoneinfo(jnl_accessor->getZone(zone.toText()));
+    if (!zoneinfo.first) {
+        return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+                    ZoneJournalReader::NO_SUCH_ZONE,
+                    ZoneJournalReaderPtr()));
+    }
+
+    try {
+        const pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> ret(
+            ZoneJournalReader::SUCCESS,
+            ZoneJournalReaderPtr(new DatabaseJournalReader(jnl_accessor,
+                                                           zone,
+                                                           zoneinfo.second,
+                                                           rrclass_,
+                                                           begin_serial,
+                                                           end_serial)));
+        LOG_DEBUG(logger, DBG_TRACE_BASIC,
+                  DATASRC_DATABASE_JOURNALREADER_START).arg(zone).arg(rrclass_).
+            arg(jnl_accessor->getDBName()).arg(begin_serial).arg(end_serial);
+        return (ret);
+    } catch (const NoSuchSerial&) {
+        return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+                    ZoneJournalReader::NO_SUCH_VERSION,
+                    ZoneJournalReaderPtr()));
+    }
+}
+}
+}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
new file mode 100644
index 0000000..81e6241
--- /dev/null
+++ b/src/lib/datasrc/database.h
@@ -0,0 +1,959 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATABASE_DATASRC_H
+#define __DATABASE_DATASRC_H
+
+#include <string>
+
+#include <boost/scoped_ptr.hpp>
+
+#include <dns/rrclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrset.h>
+
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+#include <exceptions/exceptions.h>
+
+#include <map>
+#include <set>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Abstraction of lowlevel database with DNS data
+ *
+ * This class is defines interface to databases. Each supported database
+ * will provide methods for accessing the data stored there in a generic
+ * manner. The methods are meant to be low-level, without much or any knowledge
+ * about DNS and should be possible to translate directly to queries.
+ *
+ * On the other hand, how the communication with database is done and in what
+ * schema (in case of relational/SQL database) is up to the concrete classes.
+ *
+ * This class is non-copyable, as copying connections to database makes little
+ * sense and will not be needed.
+ *
+ * \todo Is it true this does not need to be copied? For example the zone
+ *     iterator might need it's own copy. But a virtual clone() method might
+ *     be better for that than copy constructor.
+ *
+ * \note The same application may create multiple connections to the same
+ *     database, having multiple instances of this class. If the database
+ *     allows having multiple open queries at one connection, the connection
+ *     class may share it.
+ */
+class DatabaseAccessor : boost::noncopyable {
+public:
+    /**
+     * Definitions of the fields as they are required to be filled in
+     * by IteratorContext::getNext()
+     *
+     * When implementing getNext(), the columns array should
+     * be filled with the values as described in this enumeration,
+     * in this order, i.e. TYPE_COLUMN should be the first element
+     * (index 0) of the array, TTL_COLUMN should be the second element
+     * (index 1), etc.
+     */
+    enum RecordColumns {
+        TYPE_COLUMN = 0,    ///< The RRType of the record (A/NS/TXT etc.)
+        TTL_COLUMN = 1,     ///< The TTL of the record (a
+        SIGTYPE_COLUMN = 2, ///< For RRSIG records, this contains the RRTYPE
+                            ///< the RRSIG covers. In the current implementation,
+                            ///< this field is ignored.
+        RDATA_COLUMN = 3,   ///< Full text representation of the record's RDATA
+        NAME_COLUMN = 4,    ///< The domain name of this RR
+        COLUMN_COUNT = 5    ///< The total number of columns, MUST be value of
+                            ///< the largest other element in this enum plus 1.
+    };
+
+    /**
+     * Definitions of the fields to be passed to addRecordToZone().
+     *
+     * Each derived implementation of addRecordToZone() should expect
+     * the "columns" array to be filled with the values as described in this
+     * enumeration, in this order.
+     */
+    enum AddRecordColumns {
+        ADD_NAME = 0, ///< The owner name of the record (a domain name)
+        ADD_REV_NAME = 1, ///< Reversed name of NAME (used for DNSSEC)
+        ADD_TTL = 2,     ///< The TTL of the record (in numeric form)
+        ADD_TYPE = 3,    ///< The RRType of the record (A/NS/TXT etc.)
+        ADD_SIGTYPE = 4, ///< For RRSIG records, this contains the RRTYPE
+                            ///< the RRSIG covers.
+        ADD_RDATA = 5,    ///< Full text representation of the record's RDATA
+        ADD_COLUMN_COUNT = 6 ///< Number of columns
+    };
+
+    /**
+     * Definitions of the fields to be passed to deleteRecordInZone().
+     *
+     * Each derived implementation of deleteRecordInZone() should expect
+     * the "params" array to be filled with the values as described in this
+     * enumeration, in this order.
+     */
+    enum DeleteRecordParams {
+        DEL_NAME = 0, ///< The owner name of the record (a domain name)
+        DEL_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+        DEL_RDATA = 2, ///< Full text representation of the record's RDATA
+        DEL_PARAM_COUNT = 3 ///< Number of parameters
+    };
+
+    /**
+     * Operation mode when adding a record diff.
+     *
+     * This is used as the "operation" parameter value of addRecordDiff().
+     */
+    enum DiffOperation {
+        DIFF_ADD = 0,           ///< This diff is for adding an RR
+        DIFF_DELETE = 1         ///< This diff is for deleting an RR
+    };
+
+    /**
+     * Definitions of the fields to be passed to addRecordDiff().
+     *
+     * Each derived implementation of addRecordDiff() should expect
+     * the "params" array to be filled with the values as described in this
+     * enumeration, in this order.
+     */
+    enum DiffRecordParams {
+        DIFF_NAME = 0, ///< The owner name of the record (a domain name)
+        DIFF_TYPE = 1, ///< The RRType of the record (A/NS/TXT etc.)
+        DIFF_TTL = 2,  ///< The TTL of the record (in numeric form)
+        DIFF_RDATA = 3, ///< Full text representation of the record's RDATA
+        DIFF_PARAM_COUNT = 4    ///< Number of parameters
+    };
+
+    /**
+     * \brief Destructor
+     *
+     * It is empty, but needs a virtual one, since we will use the derived
+     * classes in polymorphic way.
+     */
+    virtual ~DatabaseAccessor() { }
+
+    /**
+     * \brief Retrieve a zone identifier
+     *
+     * This method looks up a zone for the given name in the database. It
+     * should match only exact zone name (eg. name is equal to the zone's
+     * apex), as the DatabaseClient will loop trough the labels itself and
+     * find the most suitable zone.
+     *
+     * It is not specified if and what implementation of this method may throw,
+     * so code should expect anything.
+     *
+     * \param name The (fully qualified) domain name of the zone's apex to be
+     *             looked up.
+     * \return The first part of the result indicates if a matching zone
+     *     was found. In case it was, the second part is internal zone ID.
+     *     This one will be passed to methods finding data in the zone.
+     *     It is not required to keep them, in which case whatever might
+     *     be returned - the ID is only passed back to the database as
+     *     an opaque handle.
+     */
+    virtual std::pair<bool, int> getZone(const std::string& name) const = 0;
+
+    /**
+     * \brief This holds the internal context of ZoneIterator for databases
+     *
+     * While the ZoneIterator implementation from DatabaseClient does all the
+     * translation from strings to DNS classes and validation, this class
+     * holds the pointer to where the database is at reading the data.
+     *
+     * It can either hold shared pointer to the connection which created it
+     * and have some kind of statement inside (in case single database
+     * connection can handle multiple concurrent SQL statements) or it can
+     * create a new connection (or, if it is more convenient, the connection
+     * itself can inherit both from DatabaseConnection and IteratorContext
+     * and just clone itself).
+     */
+    class IteratorContext : public boost::noncopyable {
+    public:
+        /**
+         * \brief Destructor
+         *
+         * Virtual destructor, so any descendand class is destroyed correctly.
+         */
+        virtual ~IteratorContext() { }
+
+        /**
+         * \brief Function to provide next resource record
+         *
+         * This function should provide data about the next resource record
+         * from the data that is searched. The data is not converted yet.
+         *
+         * Depending on how the iterator was constructed, there is a difference
+         * in behaviour; for a 'full zone iterator', created with
+         * getAllRecords(), all COLUMN_COUNT elements of the array are
+         * overwritten.
+         * For a 'name iterator', created with getRecords(), the column
+         * NAME_COLUMN is untouched, since what would be added here is by
+         * definition already known to the caller (it already passes it as
+         * an argument to getRecords()).
+         *
+         * Once this function returns false, any subsequent call to it should
+         * result in false.  The implementation of a derived class must ensure
+         * it doesn't cause any disruption due to that such as a crash or
+         * exception.
+         *
+         * \note The order of RRs is not strictly set, but the RRs for single
+         * RRset must not be interleaved with any other RRs (eg. RRsets must be
+         * "together").
+         *
+         * \param columns The data will be returned through here. The order
+         *     is specified by the RecordColumns enum, and the size must be
+         *     COLUMN_COUNT
+         * \todo Do we consider databases where it is stored in binary blob
+         *     format?
+         * \throw DataSourceError if there's database-related error. If the
+         *     exception (or any other in case of derived class) is thrown,
+         *     the iterator can't be safely used any more.
+         * \return true if a record was found, and the columns array was
+         *         updated. false if there was no more data, in which case
+         *         the columns array is untouched.
+         */
+        virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) = 0;
+    };
+
+    typedef boost::shared_ptr<IteratorContext> IteratorContextPtr;
+
+    /**
+     * \brief Creates an iterator context for a specific name.
+     *
+     * Returns an IteratorContextPtr that contains all records of the
+     * given name from the given zone.
+     *
+     * The implementation of the iterator that is returned may leave the
+     * NAME_COLUMN column of the array passed to getNext() untouched, as that
+     * data is already known (it is the same as the name argument here)
+     *
+     * \exception any Since any implementation can be used, the caller should
+     *            expect any exception to be thrown.
+     *
+     * \param name The name to search for. This should be a FQDN.
+     * \param id The ID of the zone, returned from getZone().
+     * \param subdomains If set to true, match subdomains of name instead
+     *     of name itself. It is used to find empty domains and match
+     *     wildcards.
+     * \return Newly created iterator context. Must not be NULL.
+     */
+    virtual IteratorContextPtr getRecords(const std::string& name,
+                                          int id,
+                                          bool subdomains = false) const = 0;
+
+    /**
+     * \brief Creates an iterator context for the whole zone.
+     *
+     * Returns an IteratorContextPtr that contains all records of the
+     * zone with the given zone id.
+     *
+     * Each call to getNext() on the returned iterator should copy all
+     * column fields of the array that is passed, as defined in the
+     * RecordColumns enum.
+     *
+     * \exception any Since any implementation can be used, the caller should
+     *            expect any exception to be thrown.
+     *
+     * \param id The ID of the zone, returned from getZone().
+     * \return Newly created iterator context. Must not be NULL.
+     */
+    virtual IteratorContextPtr getAllRecords(int id) const = 0;
+
+    /**
+     * \brief Creates an iterator context for a set of differences.
+     *
+     * Returns an IteratorContextPtr that contains all difference records for
+     * the given zone between two versions of a zone.
+     *
+     * The difference records are the set of records that would appear in an
+     * IXFR serving a request for the difference between two versions of a zone.
+     * The records are returned in the same order as they would be in the IXFR.
+     * This means that if the the difference between versions of a zone with SOA
+     * serial numbers of "start" and "end" is required, and the zone contains
+     * the differences between serial number "start" to serial number
+     * "intermediate" and from serial number "intermediate" to serial number
+     * "end", the returned records will be (in order):
+     *
+     * \li SOA for serial "start"
+     * \li Records removed from the zone between versions "start" and
+     *     "intermediate" of the zone.  The order of these is not guaranteed.
+     * \li SOA for serial "intermediate"
+     * \li Records added to the zone between versions "start" and
+     *     "intermediate" of the zone.  The order of these is not guaranteed.
+     * \li SOA for serial "intermediate"
+     * \li Records removed from the zone between versions "intermediate" and
+     *     "end" of the zone.  The order of these is not guaranteed.
+     * \li SOA for serial "end"
+     * \li Records added to the zone between versions "intermediate" and "end"
+     *     of the zone. The order of these is not guaranteed.
+     *
+     * Note that there is no requirement that "start" be less than "end". Owing
+     * to serial number arithmetic, it is entirely possible that a later version
+     * of a zone will have a smaller SOA serial number than an earlier version.
+     *
+     * Each call to getNext() on the returned iterator should copy all
+     * column fields of the array that is passed, as defined in the
+     * RecordColumns enum.
+     *
+     * \exception any Since any implementation can be used, the caller should
+     *                expect any exception to be thrown.
+     *
+     * \param id The ID of the zone, returned from getZone().
+     * \param start The SOA serial number of the version of the zone from
+     *        which the difference sequence should start.
+     * \param end The SOA serial number of the version of the zone at which
+     *        the difference sequence should end.
+     *
+     * \return Newly created iterator context. Must not be NULL.
+     */
+    virtual IteratorContextPtr
+    getDiffs(int id, uint32_t start, uint32_t end) const = 0;
+
+    /// Start a transaction for updating a zone.
+    ///
+    /// Each derived class version of this method starts a database
+    /// transaction to make updates to the given name of zone (whose class was
+    /// specified at the construction of the class).
+    ///
+    /// If \c replace is true, any existing records of the zone will be
+    /// deleted on successful completion of updates (after
+    /// \c commitUpdateZone()); if it's false, the existing records will be
+    /// intact unless explicitly deleted by \c deleteRecordInZone().
+    ///
+    /// A single \c DatabaseAccessor instance can perform at most one
+    /// transaction; a duplicate call to this method before
+    /// \c commitUpdateZone() or \c rollbackUpdateZone(), or a call to this
+    /// method within another transaction started by \c startTransaction()
+    /// will result in a \c DataSourceError exception.
+    /// If multiple update attempts need to be performed concurrently (and
+    /// if the underlying database allows such operation), separate
+    /// \c DatabaseAccessor instance must be created.
+    ///
+    /// \note The underlying database may not allow concurrent updates to
+    /// the same database instance even if different "connections" (or
+    /// something similar specific to the database implementation) are used
+    /// for different sets of updates.  For example, it doesn't seem to be
+    /// possible for SQLite3 unless different databases are used.  MySQL
+    /// allows concurrent updates to different tables of the same database,
+    /// but a specific operation may block others.  As such, this interface
+    /// doesn't require derived classes to allow concurrent updates with
+    /// multiple \c DatabaseAccessor instances; however, the implementation
+    /// is encouraged to do the best for making it more likely to succeed
+    /// as long as the underlying database system allows concurrent updates.
+    ///
+    /// This method returns a pair of \c bool and \c int.  Its first element
+    /// indicates whether the given name of zone is found.  If it's false,
+    /// the transaction isn't considered to be started; a subsequent call to
+    /// this method with an existing zone name should succeed.  Likewise,
+    /// if a call to this method results in an exception, the transaction
+    /// isn't considered to be started.  Note also that if the zone is not
+    /// found this method doesn't try to create a new one in the database.
+    /// It must have been created by some other means beforehand.
+    ///
+    /// The second element is the internal zone ID used for subsequent
+    /// updates.  Depending on implementation details of the actual derived
+    /// class method, it may be different from the one returned by
+    /// \c getZone(); for example, a specific implementation may use a
+    /// completely new zone ID when \c replace is true.
+    ///
+    /// \exception DataSourceError Duplicate call to this method, call to
+    /// this method within another transaction, or some internal database
+    /// related error.
+    ///
+    /// \param zone_name A string representation of the zone name to be updated
+    /// \param replace Whether to replace the entire zone (see above)
+    ///
+    /// \return A pair of bool and int, indicating whether the specified zone
+    /// exists and (if so) the zone ID to be used for the update, respectively.
+    virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
+                                                 bool replace) = 0;
+
+    /// Add a single record to the zone to be updated.
+    ///
+    /// This method provides a simple interface to insert a new record
+    /// (a database "row") to the zone in the update context started by
+    /// \c startUpdateZone().  The zone to which the record to be added
+    /// is the one specified at the time of the \c startUpdateZone() call.
+    ///
+    /// A successful call to \c startUpdateZone() must have preceded to
+    /// this call; otherwise a \c DataSourceError exception will be thrown.
+    ///
+    /// The row is defined as a vector of strings that has exactly
+    /// ADD_COLUMN_COUNT number of elements.  See AddRecordColumns for
+    /// the semantics of each element.
+    ///
+    /// Derived class methods are not required to check whether the given
+    /// values in \c columns are valid in terms of the expected semantics;
+    /// in general, it's the caller's responsibility.
+    /// For example, TTLs would normally be expected to be a textual
+    /// representation of decimal numbers, but this interface doesn't require
+    /// the implementation to perform this level of validation.  It may check
+    /// the values, however, and in that case if it detects an error it
+    /// should throw a \c DataSourceError exception.
+    ///
+    /// Likewise, derived class methods are not required to detect any
+    /// duplicate record that is already in the zone.
+    ///
+    /// \note The underlying database schema may not have a trivial mapping
+    /// from this style of definition of rows to actual database records.
+    /// It's the implementation's responsibility to implement the mapping
+    /// in the actual derived method.
+    ///
+    /// \exception DataSourceError Invalid call without starting a transaction,
+    /// or other internal database error.
+    ///
+    /// \param columns An array of strings that defines a record to be added
+    /// to the zone.
+    virtual void addRecordToZone(
+        const std::string (&columns)[ADD_COLUMN_COUNT]) = 0;
+
+    /// Delete a single record from the zone to be updated.
+    ///
+    /// This method provides a simple interface to delete a record
+    /// (a database "row") from the zone in the update context started by
+    /// \c startUpdateZone().  The zone from which the record to be deleted
+    /// is the one specified at the time of the \c startUpdateZone() call.
+    ///
+    /// A successful call to \c startUpdateZone() must have preceded to
+    /// this call; otherwise a \c DataSourceError exception will be thrown.
+    ///
+    /// The record to be deleted is specified by a vector of strings that has
+    /// exactly DEL_PARAM_COUNT number of elements.  See DeleteRecordParams
+    /// for the semantics of each element.
+    ///
+    /// \note In IXFR, TTL may also be specified, but we intentionally
+    /// ignore that in this interface, because it's not guaranteed
+    /// that all records have the same TTL (unlike the RRset
+    /// assumption) and there can even be multiple records for the
+    /// same name, type and rdata with different TTLs.  If we only
+    /// delete one of them, subsequent lookup will still return a
+    /// positive answer, which would be confusing.  It's a higher
+    /// layer's responsibility to check if there is at least one
+    /// record in the database that has the given TTL.
+    ///
+    /// Like \c addRecordToZone, derived class methods are not required to
+    /// validate the semantics of the given parameters or to check if there
+    /// is a record that matches the specified parameter; if there isn't
+    /// it simply ignores the result.
+    ///
+    /// \exception DataSourceError Invalid call without starting a transaction,
+    /// or other internal database error.
+    ///
+    /// \param params An array of strings that defines a record to be deleted
+    /// from the zone.
+    virtual void deleteRecordInZone(
+        const std::string (&params)[DEL_PARAM_COUNT]) = 0;
+
+    /// Start a general transaction.
+    ///
+    /// Each derived class version of this method starts a database
+    /// transaction in a way specific to the database details.  Any subsequent
+    /// operations on the accessor are guaranteed to be not susceptible to
+    /// any update attempts made during the transaction.  The transaction
+    /// must be terminated by either \c commit() or \c rollback().
+    ///
+    /// In practice, this transaction is intended to be used to perform
+    /// a set of atomic reads and work as a read-only lock.  So, in many
+    /// cases \c commit() and \c rollback() will have the same effect.
+    ///
+    /// This transaction cannot coexist with an update transaction started
+    /// by \c startUpdateZone().  Such an attempt will result in
+    /// \c DataSourceError.
+    ///
+    /// \exception DataSourceError An attempt of nested transaction, or some
+    /// internal database related error.
+    virtual void startTransaction() = 0;
+
+    /// Commit a transaction.
+    ///
+    /// This method completes a transaction started by \c startTransaction
+    /// or \c startUpdateZone.
+    ///
+    /// A successful call to one of the "start" methods must have preceded to
+    /// this call; otherwise a \c DataSourceError exception will be thrown.
+    /// Once this method successfully completes, the transaction isn't
+    /// considered to exist any more.  So a new transaction can now be
+    /// started.  On the other hand, a duplicate call to this method after
+    /// a successful completion of it is invalid and should result in
+    /// a \c DataSourceError exception.
+    ///
+    /// If some internal database error happens, a \c DataSourceError
+    /// exception must be thrown.  In that case the transaction is still
+    /// considered to be valid; the caller must explicitly rollback it
+    /// or (if it's confident that the error is temporary) try to commit it
+    /// again.
+    ///
+    /// \exception DataSourceError Call without a transaction, duplicate call
+    /// to the method or internal database error.
+    virtual void commit() = 0;
+
+    /// Rollback any changes in a transaction made so far.
+    ///
+    /// This method rollbacks a transaction started by \c startTransaction or
+    /// \c startUpdateZone.  When it succeeds (it normally should, but see
+    /// below), the underlying database should be reverted to the point
+    /// before performing the corresponding "start" method.
+    ///
+    /// A successful call to one of the "start" method must have preceded to
+    /// this call; otherwise a \c DataSourceError exception will be thrown.
+    /// Once this method successfully completes, the transaction isn't
+    /// considered to exist any more.  So a new transaction can now be
+    /// started.  On the other hand, a duplicate call to this method after
+    /// a successful completion of it is invalid and should result in
+    /// a \c DataSourceError exception.
+    ///
+    /// Normally this method should not fail.  But it may not always be
+    /// possible to guarantee it depending on the characteristics of the
+    /// underlying database system.  So this interface doesn't require the
+    /// actual implementation for the error free property.  But if a specific
+    /// implementation of this method can fail, it is encouraged to document
+    /// when that can happen with its implication.
+    ///
+    /// \exception DataSourceError Call without a transaction, duplicate call
+    /// to the method or internal database error.
+    virtual void rollback() = 0;
+
+    /// Install a single RR diff in difference sequences for zone update.
+    ///
+    /// This method inserts parameters of an update operation for a single RR
+    /// (either adding or deleting one) in the underlying database.
+    /// (These parameters would normally be a separate database table, but
+    /// actual realization can differ in specific implementations).
+    /// The information given via this method generally corresponds to either
+    /// a single call to \c addRecordToZone() or \c deleteRecordInZone(),
+    /// and this method is expected to be called immediately after (or before)
+    /// a call to either of those methods.
+    ///
+    /// Note, however, that this method passes more detailed information
+    /// than those update methods: it passes "serial", even if the diff
+    /// is not for the SOA RR; it passes TTL for a diff that deletes an RR
+    /// while in \c deleteRecordInZone() it's omitted.  This is because
+    /// the stored diffs are expected to be retrieved in the form that
+    /// \c getDiffs() is expected to meet.  This means if the caller
+    /// wants to use this method with other update operations, it must
+    /// ensure the additional information is ready when this method is called.
+    ///
+    /// The caller of this method must ensure that the added diffs via
+    /// this method in a single transaction form an IXFR-style difference
+    /// sequences: Each difference sequence is a sequence of RRs:
+    /// an older version of SOA (to be deleted), zero or more other deleted
+    /// RRs, the post-transaction SOA (to be added), and zero or more other
+    /// added RRs.  So, for example, the first call to this method in a
+    /// transaction must always be deleting an SOA.  Also, the \c serial
+    /// parameter must be equal to the value of the serial field of the
+    /// SOA that was last added or deleted (if the call is to add or delete
+    /// an SOA RR, \c serial must be identical to the serial of that SOA).
+    /// The underlying derived class implementation may or may not check
+    /// this condition, but if the caller doesn't meet the condition
+    /// a subsequent call to \c getDiffs() will not work as expected.
+    ///
+    /// Any call to this method must be in a transaction, and, for now,
+    /// it must be a transaction triggered by \c startUpdateZone() (that is,
+    /// it cannot be a transaction started by \c startTransaction()).
+    /// All calls to this method are considered to be part of an atomic
+    /// transaction: Until \c commit() is performed, the added diffs are
+    /// not visible outside the transaction; if \c rollback() is performed,
+    /// all added diffs are canceled; and the added sequences are not
+    /// affected by any concurrent attempt of adding diffs (conflict resolution
+    /// is up to the database implementation).
+    ///
+    /// Also for now, all diffs are assumed to be for the zone that is
+    /// being updated in the context of \c startUpdateZone().  So the
+    /// \c zone_id parameter must be identical to the zone ID returned by
+    /// \c startUpdateZone().
+    ///
+    /// In a future version we may loosen this condition so that diffs can be
+    /// added in a generic transaction and may not even have to belong to
+    /// a single zone.  For this possible extension \c zone_id parameter is
+    /// included even if it's redundant under the current restriction.
+    ///
+    /// The support for adding (or retrieving) diffs is optional; if it's
+    /// not supported in a specific data source, this method for the
+    /// corresponding derived class will throw an \c NotImplemented exception.
+    ///
+    /// \exception DataSourceError Invalid call without starting a transaction,
+    /// zone ID doesn't match the zone being updated, or other internal
+    /// database error.
+    /// \exception NotImplemented Adding diffs is not supported in the
+    /// data source.
+    /// \exception Other The concrete derived method may throw other
+    /// data source specific exceptions.
+    ///
+    /// \param zone_id The zone for the diff to be added.
+    /// \param serial The SOA serial to which the diff belongs.
+    /// \param operation Either \c DIFF_ADD or \c DIFF_DELETE.
+    /// \param params An array of strings that defines a record for the diff.
+    virtual void addRecordDiff(
+        int zone_id, uint32_t serial, DiffOperation operation,
+        const std::string (&params)[DIFF_PARAM_COUNT]) = 0;
+
+    /// Clone the accessor with the same configuration.
+    ///
+    /// Each derived class implementation of this method will create a new
+    /// accessor of the same derived class with the same configuration
+    /// (such as the database server address) as that of the caller object
+    /// and return it.
+    ///
+    /// Note that other internal states won't be copied to the new accessor
+    /// even though the name of "clone" may indicate so.  For example, even
+    /// if the calling accessor is in the middle of a update transaction,
+    /// the new accessor will not start a transaction to trace the same
+    /// updates.
+    ///
+    /// The intended use case of cloning is to create a separate context
+    /// where a specific set of database operations can be performed
+    /// independently from the original accessor.  The updater will use it
+    /// so that multiple updaters can be created concurrently even if the
+    /// underlying database system doesn't allow running multiple transactions
+    /// in a single database connection.
+    ///
+    /// The underlying database system may not support the functionality
+    /// that would be needed to implement this method.  For example, it
+    /// may not allow a single thread (or process) to have more than one
+    /// database connections.  In such a case the derived class implementation
+    /// should throw a \c DataSourceError exception.
+    ///
+    /// \return A shared pointer to the cloned accessor.
+    virtual boost::shared_ptr<DatabaseAccessor> clone() = 0;
+
+    /**
+     * \brief Returns a string identifying this dabase backend
+     *
+     * The returned string is mainly intended to be used for
+     * debugging/logging purposes.
+     *
+     * Any implementation is free to choose the exact string content,
+     * but it is advisable to make it a name that is distinguishable
+     * from the others.
+     *
+     * \return the name of the database
+     */
+    virtual const std::string& getDBName() const = 0;
+
+    /**
+     * \brief It returns the previous name in DNSSEC order.
+     *
+     * This is used in DatabaseClient::findPreviousName and does more
+     * or less the real work, except for working on strings.
+     *
+     * \param rname The name to ask for previous of, in reversed form.
+     *     We use the reversed form (see isc::dns::Name::reverse),
+     *     because then the case insensitive order of string representation
+     *     and the DNSSEC order correspond (eg. org.example.a is followed
+     *     by org.example.a.b which is followed by org.example.b, etc).
+     * \param zone_id The zone to look through.
+     * \return The previous name.
+     * \note This function must return previous name even in case
+     *     the queried rname does not exist in the zone.
+     * \note This method must skip under-the-zone-cut data (glue data).
+     *     This might be implemented by looking for NSEC records (as glue
+     *     data don't have them) in the zone or in some other way.
+     *
+     * \throw DataSourceError if there's a problem with the database.
+     * \throw NotImplemented if this database doesn't support DNSSEC
+     *     or there's no previous name for the queried one (the NSECs
+     *     might be missing or the queried name is less or equal the
+     *     apex of the zone).
+     */
+    virtual std::string findPreviousName(int zone_id,
+                                         const std::string& rname) const = 0;
+};
+
+/**
+ * \brief Concrete data source client oriented at database backends.
+ *
+ * This class (together with corresponding versions of ZoneFinder,
+ * ZoneIterator, etc.) translates high-level data source queries to
+ * low-level calls on DatabaseAccessor. It calls multiple queries
+ * if necessary and validates data from the database, allowing the
+ * DatabaseAccessor to be just simple translation to SQL/other
+ * queries to database.
+ *
+ * While it is possible to subclass it for specific database in case
+ * of special needs, it is not expected to be needed. This should just
+ * work as it is with whatever DatabaseAccessor.
+ */
+class DatabaseClient : public DataSourceClient {
+public:
+    /**
+     * \brief Constructor
+     *
+     * It initializes the client with a database via the given accessor.
+     *
+     * \exception isc::InvalidParameter if accessor is NULL. It might throw
+     * standard allocation exception as well, but doesn't throw anything else.
+     *
+     * \param rrclass The RR class of the zones that this client will handle.
+     * \param accessor The accessor to the database to use to get data.
+     *  As the parameter suggests, the client takes ownership of the accessor
+     *  and will delete it when itself deleted.
+     */
+    DatabaseClient(isc::dns::RRClass rrclass,
+                   boost::shared_ptr<DatabaseAccessor> accessor);
+
+    /**
+     * \brief Corresponding ZoneFinder implementation
+     *
+     * The zone finder implementation for database data sources. Similarly
+     * to the DatabaseClient, it translates the queries to methods of the
+     * database.
+     *
+     * Application should not come directly in contact with this class
+     * (it should handle it trough generic ZoneFinder pointer), therefore
+     * it could be completely hidden in the .cc file. But it is provided
+     * to allow testing and for rare cases when a database needs slightly
+     * different handling, so it can be subclassed.
+     *
+     * Methods directly corresponds to the ones in ZoneFinder.
+     */
+    class Finder : public ZoneFinder {
+    public:
+        /**
+         * \brief Constructor
+         *
+         * \param database The database (shared with DatabaseClient) to
+         *     be used for queries (the one asked for ID before).
+         * \param zone_id The zone ID which was returned from
+         *     DatabaseAccessor::getZone and which will be passed to further
+         *     calls to the database.
+         * \param origin The name of the origin of this zone. It could query
+         *     it from database, but as the DatabaseClient just searched for
+         *     the zone using the name, it should have it.
+         */
+        Finder(boost::shared_ptr<DatabaseAccessor> database, int zone_id,
+               const isc::dns::Name& origin);
+        // The following three methods are just implementations of inherited
+        // ZoneFinder's pure virtual methods.
+        virtual isc::dns::Name getOrigin() const;
+        virtual isc::dns::RRClass getClass() const;
+
+        /**
+         * \brief Find an RRset in the datasource
+         *
+         * Searches the datasource for an RRset of the given name and
+         * type. If there is a CNAME at the given name, the CNAME rrset
+         * is returned.
+         * (this implementation is not complete, and currently only
+         * does full matches, CNAMES, and the signatures for matches and
+         * CNAMEs)
+         * \note target was used in the original design to handle ANY
+         *       queries. This is not implemented yet, and may use
+         *       target again for that, but it might also use something
+         *       different. It is left in for compatibility at the moment.
+         * \note options are ignored at this moment
+         *
+         * \note Maybe counter intuitively, this method is not a const member
+         * function.  This is intentional; some of the underlying implementations
+         * are expected to use a database backend, and would internally contain
+         * some abstraction of "database connection".  In the most strict sense
+         * any (even read only) operation might change the internal state of
+         * such a connection, and in that sense the operation cannot be considered
+         * "const".  In order to avoid giving a false sense of safety to the
+         * caller, we indicate a call to this method may have a surprising
+         * side effect.  That said, this view may be too strict and it may
+         * make sense to say the internal database connection doesn't affect
+         * external behavior in terms of the interface of this method.  As
+         * we gain more experiences with various kinds of backends we may
+         * revisit the constness.
+         *
+         * \exception DataSourceError when there is a problem reading
+         *                            the data from the dabase backend.
+         *                            This can be a connection, code, or
+         *                            data (parse) error.
+         *
+         * \param name The name to find
+         * \param type The RRType to find
+         * \param target Unused at this moment
+         * \param options Options about how to search.
+         *     See ZoneFinder::FindOptions.
+         */
+        virtual FindResult find(const isc::dns::Name& name,
+                                const isc::dns::RRType& type,
+                                isc::dns::RRsetList* target = NULL,
+                                const FindOptions options = FIND_DEFAULT);
+
+        /**
+         * \brief Implementation of ZoneFinder::findPreviousName method.
+         */
+        virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+            const;
+
+        /**
+         * \brief The zone ID
+         *
+         * This function provides the stored zone ID as passed to the
+         * constructor. This is meant for testing purposes and normal
+         * applications shouldn't need it.
+         */
+        int zone_id() const { return (zone_id_); }
+
+        /**
+         * \brief The database accessor.
+         *
+         * This function provides the database accessor stored inside as
+         * passed to the constructor. This is meant for testing purposes and
+         * normal applications shouldn't need it.
+         */
+        const DatabaseAccessor& getAccessor() const {
+            return (*accessor_);
+        }
+    private:
+        boost::shared_ptr<DatabaseAccessor> accessor_;
+        const int zone_id_;
+        const isc::dns::Name origin_;
+        //
+        /// \brief Shortcut name for the result of getRRsets
+        typedef std::pair<bool, std::map<dns::RRType, dns::RRsetPtr> >
+            FoundRRsets;
+        /// \brief Just shortcut for set of types
+        typedef std::set<dns::RRType> WantedTypes;
+        /**
+         * \brief Searches database for RRsets of one domain.
+         *
+         * This method scans RRs of single domain specified by name and
+         * extracts any RRsets found and requested by parameters.
+         *
+         * It is used internally by find(), because it is called multiple
+         * times (usually with different domains).
+         *
+         * \param name Which domain name should be scanned.
+         * \param types List of types the caller is interested in.
+         * \param check_ns If this is set to true, it checks nothing lives
+         *     together with NS record (with few little exceptions, like RRSIG
+         *     or NSEC). This check is meant for non-apex NS records.
+         * \param construct_name If this is NULL, the resulting RRsets have
+         *     their name set to name. If it is not NULL, it overrides the name
+         *     and uses this one (this can be used for wildcard synthesized
+         *     records).
+         * \return A pair, where the first element indicates if the domain
+         *     contains any RRs at all (not only the requested, it may happen
+         *     this is set to true, but the second part is empty). The second
+         *     part is map from RRtypes to RRsets of the corresponding types.
+         *     If the RRset is not present in DB, the RRtype is not there at
+         *     all (so you'll not find NULL pointer in the result).
+         * \throw DataSourceError If there's a low-level error with the
+         *     database or the database contains bad data.
+         */
+        FoundRRsets getRRsets(const std::string& name,
+                              const WantedTypes& types, bool check_ns,
+                              const std::string* construct_name = NULL);
+        /**
+         * \brief Checks if something lives below this domain.
+         *
+         * This looks if there's any subdomain of the given name. It can be
+         * used to test if domain is empty non-terminal.
+         *
+         * \param name The domain to check.
+         */
+        bool hasSubdomains(const std::string& name);
+
+        /**
+         * \brief Get the NSEC covering a name.
+         *
+         * This one calls findPreviousName on the given name and extracts an NSEC
+         * record on the result. It handles various error cases. The method exists
+         * to share code present at more than one location.
+         */
+        dns::RRsetPtr findNSECCover(const dns::Name& name);
+
+        /**
+         * \brief Convenience type shortcut.
+         *
+         * To find stuff in the result of getRRsets.
+         */
+        typedef std::map<dns::RRType, dns::RRsetPtr>::const_iterator
+            FoundIterator;
+    };
+
+    /**
+     * \brief Find a zone in the database
+     *
+     * This queries database's getZone to find the best matching zone.
+     * It will propagate whatever exceptions are thrown from that method
+     * (which is not restricted in any way).
+     *
+     * \param name Name of the zone or data contained there.
+     * \return FindResult containing the code and an instance of Finder, if
+     *     anything is found. However, application should not rely on the
+     *     ZoneFinder being instance of Finder (possible subclass of this class
+     *     may return something else and it may change in future versions), it
+     *     should use it as a ZoneFinder only.
+     */
+    virtual FindResult findZone(const isc::dns::Name& name) const;
+
+    /**
+     * \brief Get the zone iterator
+     *
+     * The iterator allows going through the whole zone content. If the
+     * underlying DatabaseConnection is implemented correctly, it should
+     * be possible to have multiple ZoneIterators at once and query data
+     * at the same time.
+     *
+     * \exception DataSourceError if the zone doesn't exist.
+     * \exception isc::NotImplemented if the underlying DatabaseConnection
+     *     doesn't implement iteration. But in case it is not implemented
+     *     and the zone doesn't exist, DataSourceError is thrown.
+     * \exception Anything else the underlying DatabaseConnection might
+     *     want to throw.
+     * \param name The origin of the zone to iterate.
+     * \param separate_rrs If true, the iterator will return each RR as a
+     *                     new RRset object. If false, the iterator will
+     *                     combine consecutive RRs with the name and type
+     *                     into 1 RRset. The capitalization of the RRset will
+     *                     be that of the first RR read, and TTLs will be
+     *                     adjusted to the lowest one found.
+     * \return Shared pointer to the iterator (it will never be NULL)
+     */
+    virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
+                                        bool separate_rrs = false) const;
+
+    /// This implementation internally clones the accessor from the one
+    /// used in the client and starts a separate transaction using the cloned
+    /// accessor.  The returned updater will be able to work separately from
+    /// the original client.
+    virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+                                      bool replace,
+                                      bool journaling = false) const;
+
+
+    /// This implementation internally clones the accessor from the one
+    /// used in the client for retrieving diffs and iterating over them.
+    /// The returned reader object will be able to work separately from
+    /// the original client.
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const;
+
+private:
+    /// \brief The RR class that this client handles.
+    const isc::dns::RRClass rrclass_;
+
+    /// \brief The accessor to our database.
+    const boost::shared_ptr<DatabaseAccessor> accessor_;
+};
+
+}
+}
+
+#endif  // __DATABASE_DATASRC_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/datasrc_config.h.pre.in b/src/lib/datasrc/datasrc_config.h.pre.in
new file mode 100644
index 0000000..ff99601
--- /dev/null
+++ b/src/lib/datasrc/datasrc_config.h.pre.in
@@ -0,0 +1,31 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+#ifndef __DATASRC_CONFIG_H
+#define __DATASRC_CONFIG_H 1
+
+namespace isc {
+namespace datasrc {
+
+/// \brief Default directory to find the loadable data source libraries
+///
+/// This is the directory where, once installed, loadable backend libraries
+/// such as memory_ds.so and sqlite3_ds.so are found. It is used by the
+/// DataSourceClient loader if no absolute path is used and
+/// B10_FROM_BUILD is not set in the environment.
+const char* const BACKEND_LIBRARY_PATH = "@@PKGLIBEXECDIR@@/";
+
+} // end namespace datasrc
+} // end namespace isc
+
+#endif // __DATASRC_CONFIG_H
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index 3dc69e0..b4d0df7 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -63,6 +63,92 @@ The maximum allowed number of items of the hotspot cache is set to the given
 number. If there are too many, some of them will be dropped. The size of 0
 means no limit.
 
+% DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
+
+% DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3
+Debug information. The database data source is looking up records with the given
+name and type in the database.
+
+% DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5
+The datasource backend provided resource records for the given RRset with
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
+% DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1
+When searching for a domain, the program met a delegation to a different zone
+at the given domain name. It will return that one instead.
+
+% DATASRC_DATABASE_FOUND_DELEGATION_EXACT Found delegation at %2 (exact match) in %1
+The program found the domain requested, but it is a delegation point to a
+different zone, therefore it is not authoritative for this domain name.
+It will return the NS record instead.
+
+% DATASRC_DATABASE_FOUND_DNAME Found DNAME at %2 in %1
+When searching for a domain, the program met a DNAME redirection to a different
+place in the domain space at the given domain name. It will return that one
+instead.
+
+% DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
+
+% DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4
+The data returned by the database backend did not contain any data for the given
+domain name, class and type.
+
+% DATASRC_DATABASE_FOUND_NXRRSET search in datasource %1 resulted in NXRRSET for %2/%3/%4
+The data returned by the database backend contained data for the given domain
+name and class, but not for the given type.
+
+% DATASRC_DATABASE_FOUND_RRSET search in datasource %1 resulted in RRset %2
+The data returned by the database backend contained data for the given domain
+name, and it either matches the type or has a relevant type. The RRset that is
+returned is printed.
+
+% DATASRC_DATABASE_ITERATE iterating zone %1
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+
+% DATASRC_DATABASE_ITERATE_END iterating zone finished
+While iterating through the zone, the program reached end of the data.
+
+% DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+
+% DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+
+% DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+
+% DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+
+% DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
+
 % DATASRC_DO_QUERY handling query for '%1/%2'
 A debug message indicating that a query for the given name and RR type is being
 processed.
@@ -400,12 +486,22 @@ enough information for it.  The code is 1 for error, 2 for not implemented.
 
 % DATASRC_SQLITE_CLOSE closing SQLite database
 Debug information. The SQLite data source is closing the database file.
+
+% DATASRC_SQLITE_CONNOPEN Opening sqlite database file '%1'
+The database file is being opened so it can start providing data.
+
+% DATASRC_SQLITE_CONNCLOSE Closing sqlite database
+The database file is no longer needed and is being closed.
+
 % DATASRC_SQLITE_CREATE SQLite data source created
 Debug information. An instance of SQLite data source is being created.
 
 % DATASRC_SQLITE_DESTROY SQLite data source destroyed
 Debug information. An instance of SQLite data source is being destroyed.
 
+% DATASRC_SQLITE_DROPCONN SQLite3Database is being deinitialized
+The object around a database connection is being destroyed.
+
 % DATASRC_SQLITE_ENCLOSURE looking for zone containing '%1'
 Debug information. The SQLite data source is trying to identify which zone
 should hold this domain.
@@ -458,6 +554,9 @@ source.
 The SQLite data source was asked to provide a NSEC3 record for given zone.
 But it doesn't contain that zone.
 
+% DATASRC_SQLITE_NEWCONN SQLite3Database is being initialized
+A wrapper object to hold database connection is being initialized.
+
 % DATASRC_SQLITE_OPEN opening SQLite database '%1'
 Debug information. The SQLite data source is loading an SQLite database in
 the provided file.
@@ -497,3 +596,65 @@ data source.
 This indicates a programming error. An internal task of unknown type was
 generated.
 
+% DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3
+Debug information.  A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+
+% DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3
+Debug information.  A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes.  The intermediate changes made through the updater won't
+be applied to the underlying database.  The zone name, its class, and
+the underlying database name are shown in the log message.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails.  The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation.  In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid.  The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+
+% DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3
+Debug information.  A set of updates to a zone has been successfully
+committed to the corresponding database backend.  The zone name,
+its class and the database name are printed.
+
+% DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source.  The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset.  The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences.  The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+
+% DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset.  The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message.  The administrator should examine the diff in the database
+to find any invalid data and fix it.
diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc
new file mode 100644
index 0000000..35a79fe
--- /dev/null
+++ b/src/lib/datasrc/factory.cc
@@ -0,0 +1,144 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "factory.h"
+
+#include "data_source.h"
+#include "database.h"
+#include "sqlite3_accessor.h"
+#include "memory_datasrc.h"
+
+#include "datasrc_config.h"
+
+#include <datasrc/logger.h>
+
+#include <dlfcn.h>
+#include <cstdlib>
+
+using namespace std;
+using namespace isc::data;
+using namespace isc::datasrc;
+
+namespace {
+// This helper function takes the 'type' string as passed to
+// the DataSourceClient container below, and, unless it
+// already specifies a specific loadable .so file, will
+// convert the short-name to the full file.
+// I.e. it will add '_ds.so' (if necessary), and prepend
+// it with an absolute path (if necessary).
+// Returns the resulting string to use with LibraryContainer.
+const std::string
+getDataSourceLibFile(const std::string& type) {
+    if (type.empty()) {
+        isc_throw(DataSourceLibraryError,
+                  "DataSourceClient container called with empty type value");
+    }
+    if (type == ".so") {
+        isc_throw(DataSourceLibraryError, "DataSourceClient container called "
+                                          "with bad type or file name");
+    }
+
+    // Type can be either a short name, in which case we need to
+    // append "_ds.so", or it can be a direct .so library.
+    std::string lib_file = type;
+    const int ext_pos = lib_file.rfind(".so");
+    if (ext_pos == std::string::npos || ext_pos + 3 != lib_file.length()) {
+        lib_file.append("_ds.so");
+    }
+    // And if it is not an absolute path, prepend it with our
+    // loadable backend library path
+    if (type[0] != '/') {
+        // When running from the build tree, we do NOT want
+        // to load the installed loadable library
+        if (getenv("B10_FROM_BUILD") != NULL) {
+            lib_file = std::string(getenv("B10_FROM_BUILD")) +
+                       "/src/lib/datasrc/.libs/" + lib_file;
+        } else {
+            lib_file = isc::datasrc::BACKEND_LIBRARY_PATH + lib_file;
+        }
+    }
+    return (lib_file);
+}
+} // end anonymous namespace
+
+namespace isc {
+namespace datasrc {
+
+LibraryContainer::LibraryContainer(const std::string& name) {
+    // use RTLD_GLOBAL so that shared symbols (e.g. exceptions)
+    // are recognized as such
+    ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_GLOBAL);
+    if (ds_lib_ == NULL) {
+        // This may cause the filename to appear twice in the actual
+        // error, but the output of dlerror is implementation-dependent
+        isc_throw(DataSourceLibraryError, "dlopen failed for " << name << 
+                                          ": " << dlerror());
+    }
+}
+
+LibraryContainer::~LibraryContainer() {
+    dlclose(ds_lib_);
+}
+
+void*
+LibraryContainer::getSym(const char* name) {
+    // Since dlsym can return NULL on success, we check for errors by
+    // first clearing any existing errors with dlerror(), then calling dlsym,
+    // and finally checking for errors with dlerror()
+    dlerror();
+
+    void *sym = dlsym(ds_lib_, name);
+
+    const char* dlsym_error = dlerror();
+    if (dlsym_error != NULL) {
+        isc_throw(DataSourceLibrarySymbolError, dlsym_error);
+    }
+
+    return (sym);
+}
+
+DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
+                                                     ConstElementPtr config)
+: ds_lib_(getDataSourceLibFile(type))
+{
+    // We are casting from a data to a function pointer here
+    // Some compilers (rightfully) complain about that, but
+    // c-style casts are accepted the most here. If we run
+    // into any that also don't like this, we might need to
+    // use some form of union cast or memory copy to get
+    // from the void* to the function pointer.
+    ds_creator* ds_create = (ds_creator*)ds_lib_.getSym("createInstance");
+    destructor_ = (ds_destructor*)ds_lib_.getSym("destroyInstance");
+
+    std::string error;
+    try {
+        instance_ = ds_create(config, error);
+        if (instance_ == NULL) {
+            isc_throw(DataSourceError, error);
+        }
+    } catch (const std::exception& exc) {
+        isc_throw(DataSourceError, "Unknown uncaught exception from " + type +
+                                   " createInstance: " + exc.what());
+    } catch (...) {
+        isc_throw(DataSourceError, "Unknown uncaught exception from " + type);
+    }
+}
+
+DataSourceClientContainer::~DataSourceClientContainer() {
+    destructor_(instance_);
+}
+
+} // end namespace datasrc
+} // end namespace isc
+
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
new file mode 100644
index 0000000..9d0a762
--- /dev/null
+++ b/src/lib/datasrc/factory.h
@@ -0,0 +1,179 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATA_SOURCE_FACTORY_H
+#define __DATA_SOURCE_FACTORY_H 1
+
+#include <boost/noncopyable.hpp>
+
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
+#include <exceptions/exceptions.h>
+
+#include <cc/data.h>
+
+namespace isc {
+namespace datasrc {
+
+
+/// \brief Raised if there is an error loading the datasource implementation
+///        library
+class DataSourceLibraryError : public DataSourceError {
+public:
+    DataSourceLibraryError(const char* file, size_t line, const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
+/// \brief Raised if there is an error reading a symbol from the datasource
+///        implementation library
+class DataSourceLibrarySymbolError : public DataSourceError {
+public:
+    DataSourceLibrarySymbolError(const char* file, size_t line,
+                                 const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
+typedef DataSourceClient* ds_creator(isc::data::ConstElementPtr config,
+                                     std::string& error);
+typedef void ds_destructor(DataSourceClient* instance);
+
+/// \brief Container class for dynamically loaded libraries
+///
+/// This class is used to dlopen() a library, provides access to dlsym(),
+/// and cleans up the dlopened library when the instance of this class is
+/// destroyed.
+///
+/// Its main function is to provide RAII-style access to dlopen'ed libraries.
+///
+/// \note Currently it is Datasource-backend specific. If we have need for this
+///       in other places than for dynamically loading datasources, then, apart
+///       from moving it to another location, we also need to make the
+///       exceptions raised more general.
+class LibraryContainer : boost::noncopyable {
+public:
+    /// \brief Constructor
+    ///
+    /// \param name The name of the library (.so) file. This file must be in
+    ///             the library path.
+    ///
+    /// \exception DataSourceLibraryError If the library cannot be found or
+    ///            cannot be loaded, or if name is an empty string.
+    LibraryContainer(const std::string& name);
+
+    /// \brief Destructor
+    ///
+    /// Cleans up the library by calling dlclose()
+    ~LibraryContainer();
+
+    /// \brief Retrieve a symbol
+    ///
+    /// This retrieves a symbol from the loaded library.
+    ///
+    /// \exception DataSourceLibrarySymbolError if the symbol cannot be found,
+    ///            or if another error (as reported by dlerror() occurs.
+    ///
+    /// \param name The name of the symbol to retrieve
+    /// \return A pointer to the symbol. This may be NULL, and if so, indicates
+    ///         the symbol does indeed exist, but has the value NULL itself.
+    ///         If the symbol does not exist, a DataSourceLibrarySymbolError is
+    ///         raised.
+    ///
+    /// \note The argument is a const char* (and not a std::string like the
+    ///       argument in the constructor). This argument is always a fixed
+    ///       string in the code, while the other can be read from
+    ///       configuration, and needs modification
+    void* getSym(const char* name);
+private:
+    /// Pointer to the dynamically loaded library structure
+    void *ds_lib_;
+};
+
+
+/// \brief Container for a specific instance of a dynamically loaded
+///        DataSourceClient implementation
+///
+/// Given a datasource type and a type-specific set of configuration data,
+/// the corresponding dynamic library is loaded (if it hadn't been already),
+/// and an instance is created. This instance is stored within this structure,
+/// and can be accessed through getInstance(). Upon destruction of this
+/// container, the stored instance of the DataSourceClient is deleted with
+/// the destructor function provided by the loaded library.
+///
+/// The 'type' is actually the name of the library, minus the '_ds.so' postfix
+/// Datasource implementation libraries therefore have a fixed name, both for
+/// easy recognition and to reduce potential mistakes.
+/// For example, the sqlite3 implementation has the type 'sqlite3', and the
+/// derived filename 'sqlite3_ds.so'
+/// The value of type can be a specific loadable library; if it already ends
+/// with '.so', the loader will not add '_ds.so'.
+/// It may also be an absolute path; if it starts with '/', nothing is
+/// prepended. If it does not, the loadable library will be taken from the
+/// installation directory, see the value of
+/// isc::datasrc::BACKEND_LIBRARY_PATH in datasrc_config.h for the exact path.
+///
+/// \note When 'B10_FROM_BUILD' is set in the environment, the build
+///       directory is used instead of the install directory.
+///
+/// There are of course some demands to an implementation, not all of which
+/// can be verified compile-time. It must provide a creator and destructor
+/// functions. The creator function must return an instance of a subclass of
+/// DataSourceClient. The prototypes of these functions are as follows:
+/// \code
+/// extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr cfg);
+///
+/// extern "C" void destroyInstance(isc::data::DataSourceClient* instance);
+/// \endcode
+class DataSourceClientContainer : boost::noncopyable {
+public:
+    /// \brief Constructor
+    ///
+    /// \exception DataSourceLibraryError if there is an error loading the
+    ///            backend library
+    /// \exception DataSourceLibrarySymbolError if the library does not have
+    ///            the needed symbols, or if there is an error reading them
+    /// \exception DataError if the given config is not correct
+    ///            for the given type, or if there was a problem during
+    ///            initialization
+    ///
+    /// \param type The type of the datasource client. Based on the value of
+    ///             type, a specific backend library is used, by appending the
+    ///             string '_ds.so' to the given type, and loading that as the
+    ///             implementation library
+    /// \param config Type-specific configuration data, see the documentation
+    ///               of the datasource backend type for information on what
+    ///               configuration data to pass.
+    DataSourceClientContainer(const std::string& type,
+                              isc::data::ConstElementPtr config);
+
+    /// \brief Destructor
+    ~DataSourceClientContainer();
+
+    /// \brief Accessor to the instance
+    ///
+    /// \return Reference to the DataSourceClient instance contained in this
+    ///         container.
+    DataSourceClient& getInstance() { return *instance_; }
+
+private:
+    DataSourceClient* instance_;
+    ds_destructor* destructor_;
+    LibraryContainer ds_lib_;
+};
+
+} // end namespace datasrc
+} // end namespace isc
+#endif  // DATA_SOURCE_FACTORY_H
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/iterator.h b/src/lib/datasrc/iterator.h
new file mode 100644
index 0000000..99d3331
--- /dev/null
+++ b/src/lib/datasrc/iterator.h
@@ -0,0 +1,105 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DATASRC_ZONE_ITERATOR_H
+#define __DATASRC_ZONE_ITERATOR_H 1
+
+#include <dns/rrset.h>
+
+#include <boost/noncopyable.hpp>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+
+/**
+ * \brief Read-only iterator to a zone.
+ *
+ * You can get an instance of (descendand of) ZoneIterator from
+ * DataSourceClient::getIterator() method. The actual concrete implementation
+ * will be different depending on the actual data source used. This is the
+ * abstract interface.
+ *
+ * There's no way to start iterating from the beginning again or return.
+ */
+class ZoneIterator : public boost::noncopyable {
+public:
+    /**
+     * \brief Destructor
+     *
+     * Virtual destructor. It is empty, but ensures the right destructor from
+     * descendant is called.
+     */
+    virtual ~ ZoneIterator() { }
+
+    /**
+     * \brief Get next RRset from the zone.
+     *
+     * This returns the next RRset in the zone as a shared pointer. The
+     * shared pointer is used to allow both accessing in-memory data and
+     * automatic memory management.
+     *
+     * Any special order is not guaranteed.
+     *
+     * While this can potentially throw anything (including standard allocation
+     * errors), it should be rare.
+     *
+     * \return Pointer to the next RRset or NULL pointer when the iteration
+     *     gets to the end of the zone.
+     */
+    virtual isc::dns::ConstRRsetPtr getNextRRset() = 0;
+
+    /**
+     * \brief Return the SOA record of the zone in the iterator context.
+     *
+     * This method returns the zone's SOA record (if any, and a valid zone
+     * should have it) in the form of an RRset object.  This SOA is identical
+     * to that (again, if any) contained in the sequence of RRsets returned
+     * by the iterator.  In that sense this method is redundant, but is
+     * provided as a convenient utility for the application of the
+     * iterator; the application may need to know the SOA serial or the
+     * SOA RR itself for the purpose of protocol handling or skipping the
+     * expensive iteration processing.
+     *
+     * If the zone doesn't have an SOA (which is broken, but some data source
+     * may allow that situation), this method returns NULL.  Also, in the
+     * normal and valid case, the SOA should have exactly one RDATA, but
+     * this API does not guarantee it as some data source may accept such an
+     * abnormal condition.  It's up to the caller whether to check the number
+     * of RDATA and how to react to the unexpected case.
+     *
+     * Each concrete derived method must ensure that the SOA returned by this
+     * method is identical to the zone's SOA returned via the iteration.
+     * For example, even if another thread or process updates the SOA while
+     * the iterator is working, the result of this method must not be
+     * affected by the update.  For database based data sources, this can
+     * be done by making the entire iterator operation as a single database
+     * transaction, but the actual implementation can differ.
+     *
+     * \exception None
+     *
+     * \return A shared pointer to an SOA RRset that would be returned
+     * from the iteration.  It will be NULL if the zone doesn't have an SOA.
+     */
+    virtual isc::dns::ConstRRsetPtr getSOA() const = 0;
+};
+
+}
+}
+#endif  // __DATASRC_ZONE_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/logger.h b/src/lib/datasrc/logger.h
index ac5d50b..db4e5cb 100644
--- a/src/lib/datasrc/logger.h
+++ b/src/lib/datasrc/logger.h
@@ -18,7 +18,7 @@
 #include <log/macros.h>
 #include <datasrc/datasrc_messages.h>
 
-/// \file logger.h
+/// \file datasrc/logger.h
 /// \brief Data Source library global logger
 ///
 /// This holds the logger for the data source library. It is a private header
@@ -31,14 +31,14 @@ namespace datasrc {
 /// \brief The logger for this library
 extern isc::log::Logger logger;
 
-enum {
-    /// \brief Trace basic operations
-    DBG_TRACE_BASIC = 10,
-    /// \brief Trace data changes and lookups as well
-    DBG_TRACE_DATA = 20,
-    /// \brief Detailed even about how the lookups happen
-    DBG_TRACE_DETAILED = 50
-};
+/// \brief Trace basic operations
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
+
+/// \brief Trace data changes and lookups as well
+const int DBG_TRACE_DATA = DBGLVL_TRACE_BASIC_DATA;
+
+/// \brief Detailed even about how the lookups happen
+const int DBG_TRACE_DETAILED = DBGLVL_TRACE_DETAIL;
 
 }
 }
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 6565000..a79ee5b 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -16,6 +16,9 @@
 #include <cassert>
 #include <boost/shared_ptr.hpp>
 #include <boost/bind.hpp>
+#include <boost/foreach.hpp>
+
+#include <exceptions/exceptions.h>
 
 #include <dns/name.h>
 #include <dns/rrclass.h>
@@ -25,17 +28,44 @@
 #include <datasrc/memory_datasrc.h>
 #include <datasrc/rbtree.h>
 #include <datasrc/logger.h>
+#include <datasrc/iterator.h>
+#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
+
+#include <cc/data.h>
 
 using namespace std;
 using namespace isc::dns;
+using namespace isc::data;
 
 namespace isc {
 namespace datasrc {
 
-// Private data and hidden methods of MemoryZone
-struct MemoryZone::MemoryZoneImpl {
+namespace {
+// Some type aliases
+/*
+ * Each domain consists of some RRsets. They will be looked up by the
+ * RRType.
+ *
+ * The use of map is questionable with regard to performance - there'll
+ * be usually only few RRsets in the domain, so the log n benefit isn't
+ * much and a vector/array might be faster due to its simplicity and
+ * continuous memory location. But this is unlikely to be a performance
+ * critical place and map has better interface for the lookups, so we use
+ * that.
+ */
+typedef map<RRType, ConstRRsetPtr> Domain;
+typedef Domain::value_type DomainPair;
+typedef boost::shared_ptr<Domain> DomainPtr;
+// The tree stores domains
+typedef RBTree<Domain> DomainTree;
+typedef RBNode<Domain> DomainNode;
+}
+
+// Private data and hidden methods of InMemoryZoneFinder
+struct InMemoryZoneFinder::InMemoryZoneFinderImpl {
     // Constructor
-    MemoryZoneImpl(const RRClass& zone_class, const Name& origin) :
+    InMemoryZoneFinderImpl(const RRClass& zone_class, const Name& origin) :
         zone_class_(zone_class), origin_(origin), origin_data_(NULL),
         domains_(true)
     {
@@ -44,25 +74,6 @@ struct MemoryZone::MemoryZoneImpl {
         DomainPtr origin_domain(new Domain);
         origin_data_->setData(origin_domain);
     }
-
-    // Some type aliases
-    /*
-     * Each domain consists of some RRsets. They will be looked up by the
-     * RRType.
-     *
-     * The use of map is questionable with regard to performance - there'll
-     * be usually only few RRsets in the domain, so the log n benefit isn't
-     * much and a vector/array might be faster due to its simplicity and
-     * continuous memory location. But this is unlikely to be a performance
-     * critical place and map has better interface for the lookups, so we use
-     * that.
-     */
-    typedef map<RRType, ConstRRsetPtr> Domain;
-    typedef Domain::value_type DomainPair;
-    typedef boost::shared_ptr<Domain> DomainPtr;
-    // The tree stores domains
-    typedef RBTree<Domain> DomainTree;
-    typedef RBNode<Domain> DomainNode;
     static const DomainNode::Flags DOMAINFLAG_WILD = DomainNode::FLAG_USER1;
 
     // Information about the zone
@@ -223,7 +234,7 @@ struct MemoryZone::MemoryZoneImpl {
      * Implementation of longer methods. We put them here, because the
      * access is without the impl_-> and it will get inlined anyway.
      */
-    // Implementation of MemoryZone::add
+    // Implementation of InMemoryZoneFinder::add
     result::Result add(const ConstRRsetPtr& rrset, DomainTree* domains) {
         // Sanitize input.  This will cause an exception to be thrown
         // if the input RRset is empty.
@@ -409,7 +420,7 @@ struct MemoryZone::MemoryZoneImpl {
         }
     }
 
-    // Implementation of MemoryZone::find
+    // Implementation of InMemoryZoneFinder::find
     FindResult find(const Name& name, RRType type,
                     RRsetList* target, const FindOptions options) const
     {
@@ -593,50 +604,50 @@ struct MemoryZone::MemoryZoneImpl {
     }
 };
 
-MemoryZone::MemoryZone(const RRClass& zone_class, const Name& origin) :
-    impl_(new MemoryZoneImpl(zone_class, origin))
+InMemoryZoneFinder::InMemoryZoneFinder(const RRClass& zone_class, const Name& origin) :
+    impl_(new InMemoryZoneFinderImpl(zone_class, origin))
 {
     LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_CREATE).arg(origin).
         arg(zone_class);
 }
 
-MemoryZone::~MemoryZone() {
+InMemoryZoneFinder::~InMemoryZoneFinder() {
     LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_DESTROY).arg(getOrigin()).
         arg(getClass());
     delete impl_;
 }
 
-const Name&
-MemoryZone::getOrigin() const {
+Name
+InMemoryZoneFinder::getOrigin() const {
     return (impl_->origin_);
 }
 
-const RRClass&
-MemoryZone::getClass() const {
+RRClass
+InMemoryZoneFinder::getClass() const {
     return (impl_->zone_class_);
 }
 
-Zone::FindResult
-MemoryZone::find(const Name& name, const RRType& type,
-                 RRsetList* target, const FindOptions options) const
+ZoneFinder::FindResult
+InMemoryZoneFinder::find(const Name& name, const RRType& type,
+                 RRsetList* target, const FindOptions options)
 {
     return (impl_->find(name, type, target, options));
 }
 
 result::Result
-MemoryZone::add(const ConstRRsetPtr& rrset) {
+InMemoryZoneFinder::add(const ConstRRsetPtr& rrset) {
     return (impl_->add(rrset, &impl_->domains_));
 }
 
 
 void
-MemoryZone::load(const string& filename) {
+InMemoryZoneFinder::load(const string& filename) {
     LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_LOAD).arg(getOrigin()).
         arg(filename);
     // Load it into a temporary tree
-    MemoryZoneImpl::DomainTree tmp;
+    DomainTree tmp;
     masterLoad(filename.c_str(), getOrigin(), getClass(),
-        boost::bind(&MemoryZoneImpl::addFromLoad, impl_, _1, &tmp));
+        boost::bind(&InMemoryZoneFinderImpl::addFromLoad, impl_, _1, &tmp));
     // If it went well, put it inside
     impl_->file_name_ = filename;
     tmp.swap(impl_->domains_);
@@ -644,64 +655,350 @@ MemoryZone::load(const string& filename) {
 }
 
 void
-MemoryZone::swap(MemoryZone& zone) {
+InMemoryZoneFinder::swap(InMemoryZoneFinder& zone_finder) {
     LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_SWAP).arg(getOrigin()).
-        arg(zone.getOrigin());
-    std::swap(impl_, zone.impl_);
+        arg(zone_finder.getOrigin());
+    std::swap(impl_, zone_finder.impl_);
 }
 
 const string
-MemoryZone::getFileName() const {
+InMemoryZoneFinder::getFileName() const {
     return (impl_->file_name_);
 }
 
-/// Implementation details for \c MemoryDataSrc hidden from the public
+isc::dns::Name
+InMemoryZoneFinder::findPreviousName(const isc::dns::Name&) const {
+    isc_throw(NotImplemented, "InMemory data source doesn't support DNSSEC "
+              "yet, can't find previous name");
+}
+
+/// Implementation details for \c InMemoryClient hidden from the public
 /// interface.
 ///
-/// For now, \c MemoryDataSrc only contains a \c ZoneTable object, which
-/// consists of (pointers to) \c MemoryZone objects, we may add more
+/// For now, \c InMemoryClient only contains a \c ZoneTable object, which
+/// consists of (pointers to) \c InMemoryZoneFinder objects, we may add more
 /// member variables later for new features.
-class MemoryDataSrc::MemoryDataSrcImpl {
+class InMemoryClient::InMemoryClientImpl {
 public:
-    MemoryDataSrcImpl() : zone_count(0) {}
+    InMemoryClientImpl() : zone_count(0) {}
     unsigned int zone_count;
     ZoneTable zone_table;
 };
 
-MemoryDataSrc::MemoryDataSrc() : impl_(new MemoryDataSrcImpl)
+InMemoryClient::InMemoryClient() : impl_(new InMemoryClientImpl)
 {}
 
-MemoryDataSrc::~MemoryDataSrc() {
+InMemoryClient::~InMemoryClient() {
     delete impl_;
 }
 
 unsigned int
-MemoryDataSrc::getZoneCount() const {
+InMemoryClient::getZoneCount() const {
     return (impl_->zone_count);
 }
 
 result::Result
-MemoryDataSrc::addZone(ZonePtr zone) {
-    if (!zone) {
+InMemoryClient::addZone(ZoneFinderPtr zone_finder) {
+    if (!zone_finder) {
         isc_throw(InvalidParameter,
-                  "Null pointer is passed to MemoryDataSrc::addZone()");
+                  "Null pointer is passed to InMemoryClient::addZone()");
     }
 
     LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_MEM_ADD_ZONE).
-        arg(zone->getOrigin()).arg(zone->getClass().toText());
+        arg(zone_finder->getOrigin()).arg(zone_finder->getClass().toText());
 
-    const result::Result result = impl_->zone_table.addZone(zone);
+    const result::Result result = impl_->zone_table.addZone(zone_finder);
     if (result == result::SUCCESS) {
         ++impl_->zone_count;
     }
     return (result);
 }
 
-MemoryDataSrc::FindResult
-MemoryDataSrc::findZone(const isc::dns::Name& name) const {
+InMemoryClient::FindResult
+InMemoryClient::findZone(const isc::dns::Name& name) const {
     LOG_DEBUG(logger, DBG_TRACE_DATA, DATASRC_MEM_FIND_ZONE).arg(name);
-    return (FindResult(impl_->zone_table.findZone(name).code,
-                       impl_->zone_table.findZone(name).zone));
+    ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+    return (FindResult(result.code, result.zone));
+}
+
+namespace {
+
+class MemoryIterator : public ZoneIterator {
+private:
+    RBTreeNodeChain<Domain> chain_;
+    Domain::const_iterator dom_iterator_;
+    const DomainTree& tree_;
+    const DomainNode* node_;
+    // Only used when separate_rrs_ is true
+    RdataIteratorPtr rdata_iterator_;
+    bool separate_rrs_;
+    bool ready_;
+public:
+    MemoryIterator(const DomainTree& tree, const Name& origin, bool separate_rrs) :
+        tree_(tree),
+        separate_rrs_(separate_rrs),
+        ready_(true)
+    {
+        // Find the first node (origin) and preserve the node chain for future
+        // searches
+        DomainTree::Result result(tree_.find<void*>(origin, &node_, chain_,
+                                                    NULL, NULL));
+        // It can't happen that the origin is not in there
+        if (result != DomainTree::EXACTMATCH) {
+            isc_throw(Unexpected,
+                      "In-memory zone corrupted, missing origin node");
+        }
+        // Initialize the iterator if there's somewhere to point to
+        if (node_ != NULL && node_->getData() != DomainPtr()) {
+            dom_iterator_ = node_->getData()->begin();
+            if (separate_rrs_ && dom_iterator_ != node_->getData()->end()) {
+                rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+            }
+        }
+    }
+
+    virtual ConstRRsetPtr getNextRRset() {
+        if (!ready_) {
+            isc_throw(Unexpected, "Iterating past the zone end");
+        }
+        /*
+         * This cycle finds the first nonempty node with yet unused RRset.
+         * If it is NULL, we run out of nodes. If it is empty, it doesn't
+         * contain any RRsets. If we are at the end, just get to next one.
+         */
+        while (node_ != NULL && (node_->getData() == DomainPtr() ||
+                                 dom_iterator_ == node_->getData()->end())) {
+            node_ = tree_.nextNode(chain_);
+            // If there's a node, initialize the iterator and check next time
+            // if the map is empty or not
+            if (node_ != NULL && node_->getData() != NULL) {
+                dom_iterator_ = node_->getData()->begin();
+                // New RRset, so get a new rdata iterator
+                if (separate_rrs_) {
+                    rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+                }
+            }
+        }
+        if (node_ == NULL) {
+            // That's all, folks
+            ready_ = false;
+            return (ConstRRsetPtr());
+        }
+
+        if (separate_rrs_) {
+            // For separate rrs, reconstruct a new RRset with just the
+            // 'current' rdata
+            RRsetPtr result(new RRset(dom_iterator_->second->getName(),
+                                      dom_iterator_->second->getClass(),
+                                      dom_iterator_->second->getType(),
+                                      dom_iterator_->second->getTTL()));
+            result->addRdata(rdata_iterator_->getCurrent());
+            rdata_iterator_->next();
+            if (rdata_iterator_->isLast()) {
+                // all used up, next.
+                ++dom_iterator_;
+                // New RRset, so get a new rdata iterator, but only if this
+                // was not the final RRset in the chain
+                if (dom_iterator_ != node_->getData()->end()) {
+                    rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+                }
+            }
+            return (result);
+        } else {
+            // The iterator points to the next yet unused RRset now
+            ConstRRsetPtr result(dom_iterator_->second);
+
+            // This one is used, move it to the next time for next call
+            ++dom_iterator_;
+
+            return (result);
+        }
+    }
+
+    virtual ConstRRsetPtr getSOA() const {
+        isc_throw(NotImplemented, "Not imelemented");
+    }
+};
+
+} // End of anonymous namespace
+
+ZoneIteratorPtr
+InMemoryClient::getIterator(const Name& name, bool separate_rrs) const {
+    ZoneTable::FindResult result(impl_->zone_table.findZone(name));
+    if (result.code != result::SUCCESS) {
+        isc_throw(DataSourceError, "No such zone: " + name.toText());
+    }
+
+    const InMemoryZoneFinder*
+        zone(dynamic_cast<const InMemoryZoneFinder*>(result.zone.get()));
+    if (zone == NULL) {
+        /*
+         * TODO: This can happen only during some of the tests and only as
+         * a temporary solution. This should be fixed by #1159 and then
+         * this cast and check shouldn't be necessary. We don't have
+         * test for handling a "can not happen" condition.
+         */
+        isc_throw(Unexpected, "The zone at " + name.toText() +
+                  " is not InMemoryZoneFinder");
+    }
+    return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name,
+                                               separate_rrs)));
+}
+
+ZoneUpdaterPtr
+InMemoryClient::getUpdater(const isc::dns::Name&, bool, bool) const {
+    isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
+}
+
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+InMemoryClient::getJournalReader(const isc::dns::Name&, uint32_t,
+                                 uint32_t) const
+{
+    isc_throw(isc::NotImplemented, "Journaling isn't supported for "
+              "in memory data source");
 }
+
+namespace {
+// convencience function to add an error message to a list of those
+// (TODO: move functions like these to some util lib?)
+void
+addError(ElementPtr errors, const std::string& error) {
+    if (errors != ElementPtr() && errors->getType() == Element::list) {
+        errors->add(Element::create(error));
+    }
+}
+
+/// Check if the given element exists in the map, and if it is a string
+bool
+checkConfigElementString(ConstElementPtr config, const std::string& name,
+                         ElementPtr errors)
+{
+    if (!config->contains(name)) {
+        addError(errors,
+                 "Config for memory backend does not contain a '"
+                 +name+
+                 "' value");
+        return false;
+    } else if (!config->get(name) ||
+               config->get(name)->getType() != Element::string) {
+        addError(errors, "value of " + name +
+                 " in memory backend config is not a string");
+        return false;
+    } else {
+        return true;
+    }
+}
+
+bool
+checkZoneConfig(ConstElementPtr config, ElementPtr errors) {
+    bool result = true;
+    if (!config || config->getType() != Element::map) {
+        addError(errors, "Elements in memory backend's zone list must be maps");
+        result = false;
+    } else {
+        if (!checkConfigElementString(config, "origin", errors)) {
+            result = false;
+        }
+        if (!checkConfigElementString(config, "file", errors)) {
+            result = false;
+        }
+        // we could add some existence/readabilty/parsability checks here
+        // if we want
+    }
+    return result;
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+    /* Specific configuration is under discussion, right now this accepts
+     * the 'old' configuration, see [TODO]
+     * So for memory datasource, we get a structure like this:
+     * { "type": string ("memory"),
+     *   "class": string ("IN"/"CH"/etc),
+     *   "zones": list
+     * }
+     * Zones list is a list of maps:
+     * { "origin": string,
+     *     "file": string
+     * }
+     *
+     * At this moment we cannot be completely sure of the contents of the
+     * structure, so we have to do some more extensive tests than should
+     * strictly be necessary (e.g. existence and type of elements)
+     */
+    bool result = true;
+
+    if (!config || config->getType() != Element::map) {
+        addError(errors, "Base config for memory backend must be a map");
+        result = false;
+    } else {
+        if (!checkConfigElementString(config, "type", errors)) {
+            result = false;
+        } else {
+            if (config->get("type")->stringValue() != "memory") {
+                addError(errors,
+                         "Config for memory backend is not of type \"memory\"");
+                result = false;
+            }
+        }
+        if (!checkConfigElementString(config, "class", errors)) {
+            result = false;
+        } else {
+            try {
+                RRClass rrc(config->get("class")->stringValue());
+            } catch (const isc::Exception& rrce) {
+                addError(errors,
+                         "Error parsing class config for memory backend: " +
+                         std::string(rrce.what()));
+                result = false;
+            }
+        }
+        if (!config->contains("zones")) {
+            addError(errors, "No 'zones' element in memory backend config");
+            result = false;
+        } else if (!config->get("zones") ||
+                   config->get("zones")->getType() != Element::list) {
+            addError(errors, "'zones' element in memory backend config is not a list");
+            result = false;
+        } else {
+            BOOST_FOREACH(ConstElementPtr zone_config,
+                          config->get("zones")->listValue()) {
+                if (!checkZoneConfig(zone_config, errors)) {
+                    result = false;
+                }
+            }
+        }
+    }
+
+    return (result);
+    return true;
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+createInstance(isc::data::ConstElementPtr config, std::string& error) {
+    ElementPtr errors(Element::createList());
+    if (!checkConfig(config, errors)) {
+        error = "Configuration error: " + errors->str();
+        return (NULL);
+    }
+    try {
+        return (new InMemoryClient());
+    } catch (const std::exception& exc) {
+        error = std::string("Error creating memory datasource: ") + exc.what();
+        return (NULL);
+    } catch (...) {
+        error = std::string("Error creating memory datasource, "
+                            "unknown exception");
+        return (NULL);
+    }
+}
+
+void destroyInstance(DataSourceClient* instance) {
+    delete instance;
+}
+
+
 } // end of namespace datasrc
-} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 99bb4e8..b852eb3 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -17,7 +17,12 @@
 
 #include <string>
 
+#include <boost/noncopyable.hpp>
+
 #include <datasrc/zonetable.h>
+#include <datasrc/client.h>
+
+#include <cc/data.h>
 
 namespace isc {
 namespace dns {
@@ -27,18 +32,17 @@ class RRsetList;
 
 namespace datasrc {
 
-/// A derived zone class intended to be used with the memory data source.
-class MemoryZone : public Zone {
+/// A derived zone finder class intended to be used with the memory data source.
+///
+/// Conceptually this "finder" maintains a local in-memory copy of all RRs
+/// of a single zone from some kind of source (right now it's a textual
+/// master file, but it could also be another data source with a database
+/// backend).  This is why the class has methods like \c load() or \c add().
+///
+/// This class is non copyable.
+class InMemoryZoneFinder : boost::noncopyable, public ZoneFinder {
     ///
     /// \name Constructors and Destructor.
-    ///
-    /// \b Note:
-    /// The copy constructor and the assignment operator are intentionally
-    /// defined as private, making this class non copyable.
-    //@{
-private:
-    MemoryZone(const MemoryZone& source);
-    MemoryZone& operator=(const MemoryZone& source);
 public:
     /// \brief Constructor from zone parameters.
     ///
@@ -48,17 +52,18 @@ public:
     ///
     /// \param rrclass The RR class of the zone.
     /// \param origin The origin name of the zone.
-    MemoryZone(const isc::dns::RRClass& rrclass, const isc::dns::Name& origin);
+    InMemoryZoneFinder(const isc::dns::RRClass& rrclass,
+                       const isc::dns::Name& origin);
 
     /// The destructor.
-    virtual ~MemoryZone();
+    virtual ~InMemoryZoneFinder();
     //@}
 
     /// \brief Returns the origin of the zone.
-    virtual const isc::dns::Name& getOrigin() const;
+    virtual isc::dns::Name getOrigin() const;
 
     /// \brief Returns the class of the zone.
-    virtual const isc::dns::RRClass& getClass() const;
+    virtual isc::dns::RRClass getClass() const;
 
     /// \brief Looks up an RRset in the zone.
     ///
@@ -70,7 +75,13 @@ public:
     virtual FindResult find(const isc::dns::Name& name,
                             const isc::dns::RRType& type,
                             isc::dns::RRsetList* target = NULL,
-                            const FindOptions options = FIND_DEFAULT) const;
+                            const FindOptions options = FIND_DEFAULT);
+
+    /// \brief Imelementation of the ZoneFinder::findPreviousName method
+    ///
+    /// This one throws NotImplemented exception, as InMemory doesn't
+    /// support DNSSEC currently.
+    virtual isc::dns::Name findPreviousName(const isc::dns::Name& query) const;
 
     /// \brief Inserts an rrset into the zone.
     ///
@@ -128,14 +139,14 @@ public:
     /// Return the master file name of the zone
     ///
     /// This method returns the name of the zone's master file to be loaded.
-    /// The returned string will be an empty unless the zone has successfully
-    /// loaded a zone.
+    /// The returned string will be an empty unless the zone finder has
+    /// successfully loaded a zone.
     ///
     /// This method should normally not throw an exception.  But the creation
     /// of the return string may involve a resource allocation, and if it
     /// fails, the corresponding standard exception will be thrown.
     ///
-    /// \return The name of the zone file loaded in the zone, or an empty
+    /// \return The name of the zone file loaded in the zone finder, or an empty
     /// string if the zone hasn't loaded any file.
     const std::string getFileName() const;
 
@@ -164,144 +175,160 @@ public:
     ///     configuration reloading is written.
     void load(const std::string& filename);
 
-    /// Exchanges the content of \c this zone with that of the given \c zone.
+    /// Exchanges the content of \c this zone finder with that of the given
+    /// \c zone_finder.
     ///
     /// This method never throws an exception.
     ///
-    /// \param zone Another \c MemoryZone object which is to be swapped with
-    /// \c this zone.
-    void swap(MemoryZone& zone);
+    /// \param zone_finder Another \c InMemoryZone object which is to
+    /// be swapped with \c this zone finder.
+    void swap(InMemoryZoneFinder& zone_finder);
 
 private:
     /// \name Hidden private data
     //@{
-    struct MemoryZoneImpl;
-    MemoryZoneImpl* impl_;
+    struct InMemoryZoneFinderImpl;
+    InMemoryZoneFinderImpl* impl_;
     //@}
+    // The friend here is for InMemoryClient::getIterator. The iterator
+    // needs to access the data inside the zone, so the InMemoryClient
+    // extracts the pointer to data and puts it into the iterator.
+    // The access is read only.
+    friend class InMemoryClient;
 };
 
-/// \brief A data source that uses in memory dedicated backend.
+/// \brief A data source client that holds all necessary data in memory.
 ///
-/// The \c MemoryDataSrc class represents a data source and provides a
-/// basic interface to help DNS lookup processing. For a given domain
-/// name, its \c findZone() method searches the in memory dedicated backend
-/// for the zone that gives a longest match against that name.
+/// The \c InMemoryClient class provides an access to a conceptual data
+/// source that maintains all necessary data in a memory image, thereby
+/// allowing much faster lookups.  The in memory data is a copy of some
+/// real physical source - in the current implementation a list of zones
+/// are populated as a result of \c addZone() calls; zone data is given
+/// in a standard master file (but there's a plan to use database backends
+/// as a source of the in memory data).
 ///
-/// The in memory dedicated backend are assumed to be of the same RR class,
-/// but the \c MemoryDataSrc class does not enforce the assumption through
+/// Although every data source client is assumed to be of the same RR class,
+/// the \c InMemoryClient class does not enforce the assumption through
 /// its interface.
 /// For example, the \c addZone() method does not check if the new zone is of
-/// the same RR class as that of the others already in the dedicated backend.
+/// the same RR class as that of the others already in memory.
 /// It is caller's responsibility to ensure this assumption.
 ///
 /// <b>Notes to developer:</b>
 ///
-/// For now, we don't make it a derived class of AbstractDataSrc because the
-/// interface is so different (we'll eventually consider this as part of the
-/// generalization work).
-///
 /// The addZone() method takes a (Boost) shared pointer because it would be
 /// inconvenient to require the caller to maintain the ownership of zones,
 /// while it wouldn't be safe to delete unnecessary zones inside the dedicated
 /// backend.
 ///
-/// The findZone() method takes a domain name and returns the best matching \c
-/// MemoryZone in the form of (Boost) shared pointer, so that it can provide
-/// the general interface for all data sources.
-class MemoryDataSrc {
+/// The findZone() method takes a domain name and returns the best matching
+/// \c InMemoryZoneFinder in the form of (Boost) shared pointer, so that it can
+/// provide the general interface for all data sources.
+class InMemoryClient : public DataSourceClient {
 public:
-    /// \brief A helper structure to represent the search result of
-    /// <code>MemoryDataSrc::find()</code>.
-    ///
-    /// This is a straightforward pair of the result code and a share pointer
-    /// to the found zone to represent the result of \c find().
-    /// We use this in order to avoid overloading the return value for both
-    /// the result code ("success" or "not found") and the found object,
-    /// i.e., avoid using \c NULL to mean "not found", etc.
-    ///
-    /// This is a simple value class with no internal state, so for
-    /// convenience we allow the applications to refer to the members
-    /// directly.
-    ///
-    /// See the description of \c find() for the semantics of the member
-    /// variables.
-    struct FindResult {
-        FindResult(result::Result param_code, const ZonePtr param_zone) :
-            code(param_code), zone(param_zone)
-        {}
-        const result::Result code;
-        const ZonePtr zone;
-    };
-
     ///
     /// \name Constructors and Destructor.
     ///
-    /// \b Note:
-    /// The copy constructor and the assignment operator are intentionally
-    /// defined as private, making this class non copyable.
     //@{
-private:
-    MemoryDataSrc(const MemoryDataSrc& source);
-    MemoryDataSrc& operator=(const MemoryDataSrc& source);
 
-public:
     /// Default constructor.
     ///
     /// This constructor internally involves resource allocation, and if
     /// it fails, a corresponding standard exception will be thrown.
     /// It never throws an exception otherwise.
-    MemoryDataSrc();
+    InMemoryClient();
 
     /// The destructor.
-    ~MemoryDataSrc();
+    ~InMemoryClient();
     //@}
 
-    /// Return the number of zones stored in the data source.
+    /// Return the number of zones stored in the client.
     ///
     /// This method never throws an exception.
     ///
-    /// \return The number of zones stored in the data source.
+    /// \return The number of zones stored in the client.
     unsigned int getZoneCount() const;
 
-    /// Add a \c Zone to the \c MemoryDataSrc.
+    /// Add a zone (in the form of \c ZoneFinder) to the \c InMemoryClient.
     ///
-    /// \c Zone must not be associated with a NULL pointer; otherwise
+    /// \c zone_finder must not be associated with a NULL pointer; otherwise
     /// an exception of class \c InvalidParameter will be thrown.
     /// If internal resource allocation fails, a corresponding standard
     /// exception will be thrown.
     /// This method never throws an exception otherwise.
     ///
-    /// \param zone A \c Zone object to be added.
-    /// \return \c result::SUCCESS If the zone is successfully
-    /// added to the memory data source.
+    /// \param zone_finder A \c ZoneFinder object to be added.
+    /// \return \c result::SUCCESS If the zone_finder is successfully
+    /// added to the client.
     /// \return \c result::EXIST The memory data source already
     /// stores a zone that has the same origin.
-    result::Result addZone(ZonePtr zone);
-
-    /// Find a \c Zone that best matches the given name in the \c MemoryDataSrc.
-    ///
-    /// It searches the internal storage for a \c Zone that gives the
-    /// longest match against \c name, and returns the result in the
-    /// form of a \c FindResult object as follows:
-    /// - \c code: The result code of the operation.
-    ///   - \c result::SUCCESS: A zone that gives an exact match
-    //    is found
-    ///   - \c result::PARTIALMATCH: A zone whose origin is a
-    //    super domain of \c name is found (but there is no exact match)
-    ///   - \c result::NOTFOUND: For all other cases.
-    /// - \c zone: A "Boost" shared pointer to the found \c Zone object if one
-    //  is found; otherwise \c NULL.
-    ///
-    /// This method never throws an exception.
+    result::Result addZone(ZoneFinderPtr zone_finder);
+
+    /// Returns a \c ZoneFinder for a zone_finder that best matches the given
+    /// name.
     ///
-    /// \param name A domain name for which the search is performed.
-    /// \return A \c FindResult object enclosing the search result (see above).
-    FindResult findZone(const isc::dns::Name& name) const;
+    /// This derived version of the method never throws an exception.
+    /// For other details see \c DataSourceClient::findZone().
+    virtual FindResult findZone(const isc::dns::Name& name) const;
+
+    /// \brief Implementation of the getIterator method
+    virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
+                                        bool separate_rrs = false) const;
+
+    /// In-memory data source is read-only, so this derived method will
+    /// result in a NotImplemented exception.
+    ///
+    /// \note We plan to use a database-based data source as a backend
+    /// persistent storage for an in-memory data source.  When it's
+    /// implemented we may also want to allow the user of the in-memory client
+    /// to update via its updater (this may or may not be a good idea and
+    /// is subject to further discussions).
+    virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
+                                      bool replace, bool journaling = false)
+        const;
+
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const;
 
 private:
-    class MemoryDataSrcImpl;
-    MemoryDataSrcImpl* impl_;
+    // TODO: Do we still need the PImpl if nobody should manipulate this class
+    // directly any more (it should be handled through DataSourceClient)?
+    class InMemoryClientImpl;
+    InMemoryClientImpl* impl_;
 };
+
+/// \brief Creates an instance of the Memory datasource client
+///
+/// Currently the configuration passed here must be a MapElement, formed as
+/// follows:
+/// \code
+/// { "type": string ("memory"),
+///   "class": string ("IN"/"CH"/etc),
+///   "zones": list
+/// }
+/// Zones list is a list of maps:
+/// { "origin": string,
+///   "file": string
+/// }
+/// \endcode
+/// (i.e. the configuration that was used prior to the datasource refactor)
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+///
+/// \param config The configuration for the datasource instance
+/// \param error This string will be set to an error message if an error occurs
+///              during initialization
+/// \return An instance of the memory datasource client, or NULL if there was
+///         an error
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config,
+                                            std::string& error);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+
+
 }
 }
 #endif  // __DATA_SOURCE_MEMORY_H
diff --git a/src/lib/datasrc/rbtree.h b/src/lib/datasrc/rbtree.h
index 03a6967..b6c098a 100644
--- a/src/lib/datasrc/rbtree.h
+++ b/src/lib/datasrc/rbtree.h
@@ -209,7 +209,7 @@ public:
     /// \exception isc::InvalidParameter Unsettable flag is specified
     /// \exception None otherwise
     /// \param flag The node flag to be changed.
-    /// \on If \c true, set the flag to on; otherwise set it to off.
+    /// \param on If \c true, set the flag to on; otherwise set it to off.
     void setFlag(Flags flag, bool on = true) {
         if ((flag & ~SETTABLE_FLAGS) != 0) {
             isc_throw(isc::InvalidParameter,
@@ -226,7 +226,8 @@ public:
 private:
     /// \name Callback related methods
     ///
-    /// See the description of \c RBTree<T>::find() about callbacks.
+    /// See the description of \c RBTree<T>::find() at \ref callback
+    /// about callbacks.
     ///
     /// These methods never throw an exception.
     //@{
@@ -702,11 +703,12 @@ public:
     }
 
     /// \brief Find with callback and node chain.
+    /// \anchor callback
     ///
     /// This version of \c find() is specifically designed for the backend
-    /// of the \c MemoryZone class, and implements all necessary features
-    /// for that purpose.  Other applications shouldn't need these additional
-    /// features, and should normally use the simpler versions.
+    /// of the \c InMemoryZoneFinder class, and implements all necessary
+    /// features for that purpose.  Other applications shouldn't need these
+    /// additional features, and should normally use the simpler versions.
     ///
     /// This version of \c find() calls the callback whenever traversing (on
     /// the way from root down the tree) a marked node on the way down through
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
new file mode 100644
index 0000000..fb2ffef
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -0,0 +1,1170 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <sqlite3.h>
+
+#include <string>
+#include <vector>
+
+#include <boost/foreach.hpp>
+
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/logger.h>
+#include <datasrc/data_source.h>
+#include <datasrc/factory.h>
+#include <datasrc/database.h>
+#include <util/filename.h>
+
+using namespace std;
+using namespace isc::data;
+
+#define SQLITE_SCHEMA_VERSION 1
+
+#define CONFIG_ITEM_DATABASE_FILE "database_file"
+
+namespace isc {
+namespace datasrc {
+
+// The following enum and char* array define the SQL statements commonly
+// used in this implementation.  Corresponding prepared statements (of
+// type sqlite3_stmt*) are maintained in the statements_ array of the
+// SQLite3Parameters structure.
+
+enum StatementID {
+    ZONE = 0,
+    ANY = 1,
+    ANY_SUB = 2,
+    BEGIN = 3,
+    COMMIT = 4,
+    ROLLBACK = 5,
+    DEL_ZONE_RECORDS = 6,
+    ADD_RECORD = 7,
+    DEL_RECORD = 8,
+    ITERATE = 9,
+    FIND_PREVIOUS = 10,
+    ADD_RECORD_DIFF = 11,
+    GET_RECORD_DIFF = 12,       // This is temporary for testing "add diff"
+    LOW_DIFF_ID = 13,
+    HIGH_DIFF_ID = 14,
+    DIFF_RECS = 15,
+    NUM_STATEMENTS = 16
+};
+
+const char* const text_statements[NUM_STATEMENTS] = {
+    // note for ANY and ITERATE: the order of the SELECT values is
+    // specifically chosen to match the enum values in RecordColumns
+    "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2", // ZONE
+    "SELECT rdtype, ttl, sigtype, rdata FROM records "     // ANY
+        "WHERE zone_id=?1 AND name=?2",
+    "SELECT rdtype, ttl, sigtype, rdata " // ANY_SUB
+        "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
+    "BEGIN",                    // BEGIN
+    "COMMIT",                   // COMMIT
+    "ROLLBACK",                 // ROLLBACK
+    "DELETE FROM records WHERE zone_id=?1", // DEL_ZONE_RECORDS
+    "INSERT INTO records "      // ADD_RECORD
+        "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
+        "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+    "DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
+        "AND rdtype=?3 AND rdata=?4",
+    "SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
+        "WHERE zone_id = ?1 ORDER BY rname, rdtype",
+    /*
+     * This one looks for previous name with NSEC record. It is done by
+     * using the reversed name. The NSEC is checked because we need to
+     * skip glue data, which don't have the NSEC.
+     */
+    "SELECT name FROM records " // FIND_PREVIOUS
+        "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+        "rname < $2 ORDER BY rname DESC LIMIT 1",
+    "INSERT INTO diffs "        // ADD_RECORD_DIFF
+        "(zone_id, version, operation, name, rrtype, ttl, rdata) "
+        "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+    "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
+        "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation",
+
+    // Two statements to select the lowest ID and highest ID in a set of
+    // differences.
+    "SELECT id FROM diffs "     // LOW_DIFF_ID
+        "WHERE zone_id=?1 AND version=?2 and OPERATION=?3 "
+        "ORDER BY id ASC LIMIT 1",
+    "SELECT id FROM diffs "     // HIGH_DIFF_ID
+        "WHERE zone_id=?1 AND version=?2 and OPERATION=?3 "
+        "ORDER BY id DESC LIMIT 1",
+
+    // In the next statement, note the redundant ID.  This is to ensure
+    // that the columns match the column IDs passed to the iterator
+    "SELECT rrtype, ttl, id, rdata, name FROM diffs "   // DIFF_RECS
+        "WHERE zone_id=?1 AND id>=?2 and id<=?3 "
+        "ORDER BY id ASC"
+};
+
+struct SQLite3Parameters {
+    SQLite3Parameters() :
+        db_(NULL), version_(-1), in_transaction(false), updating_zone(false),
+        updated_zone_id(-1)
+    {
+        for (int i = 0; i < NUM_STATEMENTS; ++i) {
+            statements_[i] = NULL;
+        }
+    }
+
+    // This method returns the specified ID of SQLITE3 statement.  If it's
+    // not yet prepared it internally creates a new one.  This way we can
+    // avoid preparing unnecessary statements and minimize the overhead.
+    sqlite3_stmt*
+    getStatement(int id) {
+        assert(id < NUM_STATEMENTS);
+        if (statements_[id] == NULL) {
+            assert(db_ != NULL);
+            sqlite3_stmt* prepared = NULL;
+            if (sqlite3_prepare_v2(db_, text_statements[id], -1, &prepared,
+                                   NULL) != SQLITE_OK) {
+                isc_throw(SQLite3Error, "Could not prepare SQLite statement: "
+                          << text_statements[id] <<
+                          ": " << sqlite3_errmsg(db_));
+            }
+            statements_[id] = prepared;
+        }
+        return (statements_[id]);
+    }
+
+    void
+    finalizeStatements() {
+        for (int i = 0; i < NUM_STATEMENTS; ++i) {
+            if (statements_[i] != NULL) {
+                sqlite3_finalize(statements_[i]);
+                statements_[i] = NULL;
+            }
+        }
+    }
+
+    sqlite3* db_;
+    int version_;
+    bool in_transaction; // whether or not a transaction has been started
+    bool updating_zone;          // whether or not updating the zone
+    int updated_zone_id;        // valid only when in_transaction is true
+private:
+    // statements_ are private and must be accessed via getStatement() outside
+    // of this structure.
+    sqlite3_stmt* statements_[NUM_STATEMENTS];
+};
+
+// This is a helper class to encapsulate the code logic of executing
+// a specific SQLite3 statement, ensuring the corresponding prepared
+// statement is always reset whether the execution is completed successfully
+// or it results in an exception.
+// Note that an object of this class is intended to be used for "ephemeral"
+// statement, which is completed with a single "step" (normally within a
+// single call to an SQLite3Database method).  In particular, it cannot be
+// used for "SELECT" variants, which generally expect multiple matching rows.
+class StatementProcessor {
+public:
+    // desc will be used on failure in the what() message of the resulting
+    // DataSourceError exception.
+    StatementProcessor(SQLite3Parameters& dbparameters, StatementID stmt_id,
+                       const char* desc) :
+        dbparameters_(dbparameters), stmt_(dbparameters.getStatement(stmt_id)),
+        desc_(desc)
+    {
+        sqlite3_clear_bindings(stmt_);
+    }
+
+    ~StatementProcessor() {
+        sqlite3_reset(stmt_);
+    }
+
+    void exec() {
+        if (sqlite3_step(stmt_) != SQLITE_DONE) {
+            sqlite3_reset(stmt_);
+            isc_throw(DataSourceError, "failed to " << desc_ << ": " <<
+                      sqlite3_errmsg(dbparameters_.db_));
+        }
+    }
+
+private:
+    SQLite3Parameters& dbparameters_;
+    sqlite3_stmt* stmt_;
+    const char* const desc_;
+};
+
+SQLite3Accessor::SQLite3Accessor(const std::string& filename,
+                                 const string& rrclass) :
+    dbparameters_(new SQLite3Parameters),
+    filename_(filename),
+    class_(rrclass),
+    database_name_("sqlite3_" +
+                   isc::util::Filename(filename).nameAndExtension())
+{
+    LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_NEWCONN);
+
+    open(filename);
+}
+
+boost::shared_ptr<DatabaseAccessor>
+SQLite3Accessor::clone() {
+    return (boost::shared_ptr<DatabaseAccessor>(new SQLite3Accessor(filename_,
+                                                                    class_)));
+}
+
+namespace {
+
+// This is a helper class to initialize a Sqlite3 DB safely.  An object of
+// this class encapsulates all temporary resources that are necessary for
+// the initialization, and release them in the destructor.  Once everything
+// is properly initialized, the move() method moves the allocated resources
+// to the main object in an exception free manner.  This way, the main code
+// for the initialization can be exception safe, and can provide the strong
+// exception guarantee.
+class Initializer {
+public:
+    ~Initializer() {
+        if (params_.db_ != NULL) {
+            sqlite3_close(params_.db_);
+        }
+    }
+    void move(SQLite3Parameters* dst) {
+        *dst = params_;
+        params_ = SQLite3Parameters(); // clear everything
+    }
+    SQLite3Parameters params_;
+};
+
+const char* const SCHEMA_LIST[] = {
+    "CREATE TABLE schema_version (version INTEGER NOT NULL)",
+    "INSERT INTO schema_version VALUES (1)",
+    "CREATE TABLE zones (id INTEGER PRIMARY KEY, "
+    "name STRING NOT NULL COLLATE NOCASE, "
+    "rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN', "
+    "dnssec BOOLEAN NOT NULL DEFAULT 0)",
+    "CREATE INDEX zones_byname ON zones (name)",
+    "CREATE TABLE records (id INTEGER PRIMARY KEY, "
+        "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+        "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+        "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+        "rdata STRING NOT NULL)",
+    "CREATE INDEX records_byname ON records (name)",
+    "CREATE INDEX records_byrname ON records (rname)",
+    "CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
+        "hash STRING NOT NULL COLLATE NOCASE, "
+        "owner STRING NOT NULL COLLATE NOCASE, "
+        "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+        "rdata STRING NOT NULL)",
+    "CREATE INDEX nsec3_byhash ON nsec3 (hash)",
+    "CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
+        "zone_id INTEGER NOT NULL, "
+        "version INTEGER NOT NULL, "
+        "operation INTEGER NOT NULL, "
+        "name STRING NOT NULL COLLATE NOCASE, "
+        "rrtype STRING NOT NULL COLLATE NOCASE, "
+        "ttl INTEGER NOT NULL, "
+        "rdata STRING NOT NULL)",
+    NULL
+};
+
+sqlite3_stmt*
+prepare(sqlite3* const db, const char* const statement) {
+    sqlite3_stmt* prepared = NULL;
+    if (sqlite3_prepare_v2(db, statement, -1, &prepared, NULL) != SQLITE_OK) {
+        isc_throw(SQLite3Error, "Could not prepare SQLite statement: " <<
+                  statement << ": " << sqlite3_errmsg(db));
+    }
+    return (prepared);
+}
+
+// small function to sleep for 0.1 seconds, needed when waiting for
+// exclusive database locks (which should only occur on startup, and only
+// when the database has not been created yet)
+void doSleep() {
+    struct timespec req;
+    req.tv_sec = 0;
+    req.tv_nsec = 100000000;
+    nanosleep(&req, NULL);
+}
+
+// returns the schema version if the schema version table exists
+// returns -1 if it does not
+int checkSchemaVersion(sqlite3* db) {
+    sqlite3_stmt* prepared = NULL;
+    // At this point in time, the database might be exclusively locked, in
+    // which case even prepare() will return BUSY, so we may need to try a
+    // few times
+    for (size_t i = 0; i < 50; ++i) {
+        int rc = sqlite3_prepare_v2(db, "SELECT version FROM schema_version",
+                                    -1, &prepared, NULL);
+        if (rc == SQLITE_ERROR) {
+            // this is the error that is returned when the table does not
+            // exist
+            return (-1);
+        } else if (rc == SQLITE_OK) {
+            break;
+        } else if (rc != SQLITE_BUSY || i == 50) {
+            isc_throw(SQLite3Error, "Unable to prepare version query: "
+                        << rc << " " << sqlite3_errmsg(db));
+        }
+        doSleep();
+    }
+    if (sqlite3_step(prepared) != SQLITE_ROW) {
+        isc_throw(SQLite3Error,
+                    "Unable to query version: " << sqlite3_errmsg(db));
+    }
+    int version = sqlite3_column_int(prepared, 0);
+    sqlite3_finalize(prepared);
+    return (version);
+}
+
+// return db version
+int create_database(sqlite3* db) {
+    // try to get an exclusive lock. Once that is obtained, do the version
+    // check *again*, just in case this process was racing another
+    //
+    // try for 5 secs (50*0.1)
+    int rc;
+    logger.info(DATASRC_SQLITE_SETUP);
+    for (size_t i = 0; i < 50; ++i) {
+        rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
+                            NULL);
+        if (rc == SQLITE_OK) {
+            break;
+        } else if (rc != SQLITE_BUSY || i == 50) {
+            isc_throw(SQLite3Error, "Unable to acquire exclusive lock "
+                        "for database creation: " << sqlite3_errmsg(db));
+        }
+        doSleep();
+    }
+    int schema_version = checkSchemaVersion(db);
+    if (schema_version == -1) {
+        for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
+            if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
+                SQLITE_OK) {
+                isc_throw(SQLite3Error,
+                        "Failed to set up schema " << SCHEMA_LIST[i]);
+            }
+        }
+        sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
+        return (SQLITE_SCHEMA_VERSION);
+    } else {
+        return (schema_version);
+    }
+}
+
+void
+checkAndSetupSchema(Initializer* initializer) {
+    sqlite3* const db = initializer->params_.db_;
+
+    int schema_version = checkSchemaVersion(db);
+    if (schema_version != SQLITE_SCHEMA_VERSION) {
+        schema_version = create_database(db);
+    }
+    initializer->params_.version_ = schema_version;
+}
+
+}
+
+void
+SQLite3Accessor::open(const std::string& name) {
+    LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNOPEN).arg(name);
+    if (dbparameters_->db_ != NULL) {
+        // There shouldn't be a way to trigger this anyway
+        isc_throw(DataSourceError, "Duplicate SQLite open with " << name);
+    }
+
+    Initializer initializer;
+
+    if (sqlite3_open(name.c_str(), &initializer.params_.db_) != 0) {
+        isc_throw(SQLite3Error, "Cannot open SQLite database file: " << name);
+    }
+
+    checkAndSetupSchema(&initializer);
+    initializer.move(dbparameters_.get());
+}
+
+SQLite3Accessor::~SQLite3Accessor() {
+    LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_DROPCONN);
+    if (dbparameters_->db_ != NULL) {
+        close();
+    }
+}
+
+void
+SQLite3Accessor::close(void) {
+    LOG_DEBUG(logger, DBG_TRACE_BASIC, DATASRC_SQLITE_CONNCLOSE);
+    if (dbparameters_->db_ == NULL) {
+        isc_throw(DataSourceError,
+                  "SQLite data source is being closed before open");
+    }
+
+    dbparameters_->finalizeStatements();
+    sqlite3_close(dbparameters_->db_);
+    dbparameters_->db_ = NULL;
+}
+
+std::pair<bool, int>
+SQLite3Accessor::getZone(const std::string& name) const {
+    int rc;
+    sqlite3_stmt* const stmt = dbparameters_->getStatement(ZONE);
+
+    // Take the statement (simple SELECT id FROM zones WHERE...)
+    // and prepare it (bind the parameters to it)
+    sqlite3_reset(stmt);
+    rc = sqlite3_bind_text(stmt, 1, name.c_str(), -1, SQLITE_STATIC);
+    if (rc != SQLITE_OK) {
+        isc_throw(SQLite3Error, "Could not bind " << name <<
+                  " to SQL statement (zone)");
+    }
+    rc = sqlite3_bind_text(stmt, 2, class_.c_str(), -1, SQLITE_STATIC);
+    if (rc != SQLITE_OK) {
+        isc_throw(SQLite3Error, "Could not bind " << class_ <<
+                  " to SQL statement (zone)");
+    }
+
+    // Get the data there and see if it found anything
+    rc = sqlite3_step(stmt);
+    if (rc == SQLITE_ROW) {
+        const int zone_id = sqlite3_column_int(stmt, 0);
+        sqlite3_reset(stmt);
+        return (pair<bool, int>(true, zone_id));
+    } else if (rc == SQLITE_DONE) {
+        // Free resources
+        sqlite3_reset(stmt);
+        return (pair<bool, int>(false, 0));
+    }
+
+    sqlite3_reset(stmt);
+    isc_throw(DataSourceError, "Unexpected failure in sqlite3_step: " <<
+              sqlite3_errmsg(dbparameters_->db_));
+    // Compilers might not realize isc_throw always throws
+    return (std::pair<bool, int>(false, 0));
+}
+
+namespace {
+
+// Conversion to plain char
+const char*
+convertToPlainChar(const unsigned char* ucp, sqlite3 *db) {
+    if (ucp == NULL) {
+        // The field can really be NULL, in which case we return an
+        // empty string, or sqlite may have run out of memory, in
+        // which case we raise an error
+        if (sqlite3_errcode(db) == SQLITE_NOMEM) {
+            isc_throw(DataSourceError,
+                      "Sqlite3 backend encountered a memory allocation "
+                      "error in sqlite3_column_text()");
+        } else {
+            return ("");
+        }
+    }
+    const void* p = ucp;
+    return (static_cast<const char*>(p));
+}
+
+}
+class SQLite3Accessor::Context : public DatabaseAccessor::IteratorContext {
+public:
+    // Construct an iterator for all records. When constructed this
+    // way, the getNext() call will copy all fields
+    Context(const boost::shared_ptr<const SQLite3Accessor>& accessor, int id) :
+        iterator_type_(ITT_ALL),
+        accessor_(accessor),
+        statement_(NULL),
+        name_("")
+    {
+        // We create the statement now and then just keep getting data from it
+        statement_ = prepare(accessor->dbparameters_->db_,
+                             text_statements[ITERATE]);
+        bindZoneId(id);
+    }
+
+    // Construct an iterator for records with a specific name. When constructed
+    // this way, the getNext() call will copy all fields except name
+    Context(const boost::shared_ptr<const SQLite3Accessor>& accessor, int id,
+            const std::string& name, bool subdomains) :
+        iterator_type_(ITT_NAME),
+        accessor_(accessor),
+        statement_(NULL),
+        name_(name)
+    {
+        // We create the statement now and then just keep getting data from it
+        statement_ = prepare(accessor->dbparameters_->db_,
+                             subdomains ? text_statements[ANY_SUB] :
+                             text_statements[ANY]);
+        bindZoneId(id);
+        bindName(name_);
+    }
+
+    bool getNext(std::string (&data)[COLUMN_COUNT]) {
+        // If there's another row, get it
+        // If finalize has been called (e.g. when previous getNext() got
+        // SQLITE_DONE), directly return false
+        if (statement_ == NULL) {
+            return false;
+        }
+        const int rc(sqlite3_step(statement_));
+        if (rc == SQLITE_ROW) {
+            // For both types, we copy the first four columns
+            copyColumn(data, TYPE_COLUMN);
+            copyColumn(data, TTL_COLUMN);
+            copyColumn(data, SIGTYPE_COLUMN);
+            copyColumn(data, RDATA_COLUMN);
+            // Only copy Name if we are iterating over every record
+            if (iterator_type_ == ITT_ALL) {
+                copyColumn(data, NAME_COLUMN);
+            }
+            return (true);
+        } else if (rc != SQLITE_DONE) {
+            isc_throw(DataSourceError,
+                      "Unexpected failure in sqlite3_step: " <<
+                      sqlite3_errmsg(accessor_->dbparameters_->db_));
+        }
+        finalize();
+        return (false);
+    }
+
+    virtual ~Context() {
+        finalize();
+    }
+
+private:
+    // Depending on which constructor is called, behaviour is slightly
+    // different. We keep track of what to do with the iterator type
+    // See description of getNext() and the constructors
+    enum IteratorType {
+        ITT_ALL,
+        ITT_NAME
+    };
+
+    void copyColumn(std::string (&data)[COLUMN_COUNT], int column) {
+        data[column] = convertToPlainChar(sqlite3_column_text(statement_,
+                                                              column),
+                                          accessor_->dbparameters_->db_);
+    }
+
+    void bindZoneId(const int zone_id) {
+        if (sqlite3_bind_int(statement_, 1, zone_id) != SQLITE_OK) {
+            finalize();
+            isc_throw(SQLite3Error, "Could not bind int " << zone_id <<
+                      " to SQL statement: " <<
+                      sqlite3_errmsg(accessor_->dbparameters_->db_));
+        }
+    }
+
+    void bindName(const std::string& name) {
+        if (sqlite3_bind_text(statement_, 2, name.c_str(), -1,
+                              SQLITE_TRANSIENT) != SQLITE_OK) {
+            const char* errmsg = sqlite3_errmsg(accessor_->dbparameters_->db_);
+            finalize();
+            isc_throw(SQLite3Error, "Could not bind text '" << name <<
+                      "' to SQL statement: " << errmsg);
+        }
+    }
+
+    void finalize() {
+        sqlite3_finalize(statement_);
+        statement_ = NULL;
+    }
+
+    const IteratorType iterator_type_;
+    boost::shared_ptr<const SQLite3Accessor> accessor_;
+    sqlite3_stmt* statement_;
+    const std::string name_;
+};
+
+
+// Methods to retrieve the various iterators
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getRecords(const std::string& name, int id,
+                            bool subdomains) const
+{
+    return (IteratorContextPtr(new Context(shared_from_this(), id, name,
+                                           subdomains)));
+}
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getAllRecords(int id) const {
+    return (IteratorContextPtr(new Context(shared_from_this(), id)));
+}
+
+
+/// \brief Difference Iterator
+///
+/// This iterator is used to search through the differences table for the
+/// resouce records making up an IXFR between two versions of a zone.
+
+class SQLite3Accessor::DiffContext : public DatabaseAccessor::IteratorContext {
+public:
+
+    /// \brief Constructor
+    ///
+    /// Constructs the iterator for the difference sequence.  It is
+    /// passed two parameters, the first and last versions in the difference
+    /// sequence.  Note that because of serial number rollover, it may well
+    /// be that the start serial number is greater than the end one.
+    ///
+    /// \param zone_id ID of the zone (in the zone table)
+    /// \param start Serial number of first version in difference sequence
+    /// \param end Serial number of last version in difference sequence
+    ///
+    /// \exception any A number of exceptions can be expected
+    DiffContext(const boost::shared_ptr<const SQLite3Accessor>& accessor,
+                int zone_id, uint32_t start, uint32_t end) :
+        accessor_(accessor),
+        last_status_(SQLITE_ROW)
+    {
+        try {
+            int low_id = findIndex(LOW_DIFF_ID, zone_id, start, DIFF_DELETE);
+            int high_id = findIndex(HIGH_DIFF_ID, zone_id, end, DIFF_ADD);
+
+            // Prepare the statement that will return data values
+            reset(DIFF_RECS);
+            bindInt(DIFF_RECS, 1, zone_id);
+            bindInt(DIFF_RECS, 2, low_id);
+            bindInt(DIFF_RECS, 3, high_id);
+
+        } catch (...) {
+            // Something wrong, clear up everything.
+            accessor_->dbparameters_->finalizeStatements();
+            throw;
+        }
+    }
+
+    /// \brief Destructor
+    virtual ~DiffContext()
+    {}
+
+    /// \brief Get Next Diff Record
+    ///
+    /// Returns the next difference record in the difference sequence.
+    ///
+    /// \param data Array of std::strings COLUMN_COUNT long.  The results
+    ///        are returned in this.
+    ///
+    /// \return bool true if data is returned, false if not.
+    ///
+    /// \exceptions any Varied
+    bool getNext(std::string (&data)[COLUMN_COUNT]) {
+
+        if (last_status_ != SQLITE_DONE) {
+            // Last call (if any) didn't reach end of result set, so we
+            // can read another row from it.
+            //
+            // Get a pointer to the statement for brevity (this does not
+            // transfer ownership of the statement to this class, so there is
+            // no need to tidy up after we have finished using it).
+            sqlite3_stmt* stmt =
+                accessor_->dbparameters_->getStatement(DIFF_RECS);
+
+            const int rc(sqlite3_step(stmt));
+            if (rc == SQLITE_ROW) {
+                // Copy the data across to the output array
+                copyColumn(DIFF_RECS, data, TYPE_COLUMN);
+                copyColumn(DIFF_RECS, data, TTL_COLUMN);
+                copyColumn(DIFF_RECS, data, NAME_COLUMN);
+                copyColumn(DIFF_RECS, data, RDATA_COLUMN);
+
+            } else if (rc != SQLITE_DONE) {
+                isc_throw(DataSourceError,
+                          "Unexpected failure in sqlite3_step: " <<
+                          sqlite3_errmsg(accessor_->dbparameters_->db_));
+            }
+            last_status_ = rc;
+        }
+        return (last_status_ == SQLITE_ROW);
+    }
+
+private:
+
+    /// \brief Reset prepared statement
+    ///
+    /// Sets up the statement so that new parameters can be attached to it and
+    /// that it can be used to query for another difference sequence.
+    ///
+    /// \param stindex Index of prepared statement to which to bind
+    void reset(int stindex) {
+        sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+        if ((sqlite3_reset(stmt) != SQLITE_OK) ||
+            (sqlite3_clear_bindings(stmt) != SQLITE_OK)) {
+            isc_throw(SQLite3Error, "Could not clear statement bindings in '" <<
+                      text_statements[stindex] << "': " <<
+                      sqlite3_errmsg(accessor_->dbparameters_->db_));
+        }
+    }
+
+    /// \brief Bind Int
+    ///
+    /// Binds an integer to a specific variable in a prepared statement.
+    ///
+    /// \param stindex Index of prepared statement to which to bind
+    /// \param varindex Index of variable to which to bind
+    /// \param value Value of variable to bind
+    /// \exception SQLite3Error on an error
+    void bindInt(int stindex, int varindex, sqlite3_int64 value) {
+        if (sqlite3_bind_int64(accessor_->dbparameters_->getStatement(stindex),
+                             varindex, value) != SQLITE_OK) {
+            isc_throw(SQLite3Error, "Could not bind value to parameter " <<
+                      varindex << " in statement '" <<
+                      text_statements[stindex] << "': " <<
+                      sqlite3_errmsg(accessor_->dbparameters_->db_));
+        }
+    }
+
+    ///\brief Get Single Value
+    ///
+    /// Executes a prepared statement (which has parameters bound to it)
+    /// for which the result of a single value is expected.
+    ///
+    /// \param stindex Index of prepared statement in statement table.
+    ///
+    /// \return Value of SELECT.
+    ///
+    /// \exception TooMuchData Multiple rows returned when one expected
+    /// \exception TooLittleData Zero rows returned when one expected
+    /// \exception DataSourceError SQLite3-related error
+    int getSingleValue(StatementID stindex) {
+
+        // Get a pointer to the statement for brevity (does not transfer
+        // resources)
+        sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+
+        // Execute the data.  Should be just one result
+        int rc = sqlite3_step(stmt);
+        int result = -1;
+        if (rc == SQLITE_ROW) {
+
+            // Got some data, extract the value
+            result = sqlite3_column_int(stmt, 0);
+            rc = sqlite3_step(stmt);
+            if (rc == SQLITE_DONE) {
+
+                // All OK, exit with the value.
+                return (result);
+
+            } else if (rc == SQLITE_ROW) {
+                isc_throw(TooMuchData, "request to return one value from "
+                          "diffs table returned multiple values");
+            }
+        } else if (rc == SQLITE_DONE) {
+
+            // No data in the table.  A bare exception with no explanation is
+            // thrown, as it will be replaced by a more informative one by
+            // the caller.
+            isc_throw(TooLittleData, "");
+        }
+
+        // We get here on an error.
+        isc_throw(DataSourceError, "could not get data from diffs table: " <<
+                  sqlite3_errmsg(accessor_->dbparameters_->db_));
+
+        // Keep the compiler happy with a return value.
+        return (result);
+    }
+
+    /// \brief Find index
+    ///
+    /// Executes the prepared statement locating the high or low index in
+    /// the diffs table and returns that index.
+    ///
+    /// \param stmt_id Index of the prepared statement to execute
+    /// \param zone_id ID of the zone for which the index is being sought
+    /// \param serial Zone serial number for which an index is being sought.
+    /// \param diff Code to delete record additions or deletions
+    ///
+    /// \return int ID of the row in the difss table corresponding to the
+    ///         statement.
+    ///
+    /// \exception TooLittleData Internal error, no result returned when one
+    ///            was expected.
+    /// \exception NoSuchSerial Serial number not found.
+    /// \exception NoDiffsData No data for this zone found in diffs table
+    int findIndex(StatementID stindex, int zone_id, uint32_t serial, int diff) {
+
+        // Set up the statement
+        reset(stindex);
+        bindInt(stindex, 1, zone_id);
+        bindInt(stindex, 2, serial);
+        bindInt(stindex, 3, diff);
+
+        // Execute the statement
+        int result = -1;
+        try {
+            result = getSingleValue(stindex);
+
+        } catch (const TooLittleData&) {
+
+            // No data returned but the SQL query succeeded.  Only possibility
+            // is that there is no entry in the differences table for the given
+            // zone and version.
+            isc_throw(NoSuchSerial, "No entry in differences table for " <<
+                      " zone ID " << zone_id << ", serial number " << serial);
+        }
+
+        return (result);
+    }
+
+    /// \brief Copy Column to Output
+    ///
+    /// Copies the textual data in the result set to the specified column
+    /// in the output.
+    ///
+    /// \param stindex Index of prepared statement used to access data
+    /// \param data Array of columns passed to getNext
+    /// \param column Column of output to copy
+    void copyColumn(StatementID stindex, std::string (&data)[COLUMN_COUNT],
+                    int column) {
+
+        // Get a pointer to the statement for brevity (does not transfer
+        // resources)
+        sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+        data[column] = convertToPlainChar(sqlite3_column_text(stmt,
+                                                              column),
+                                          accessor_->dbparameters_->db_);
+    }
+
+    // Attributes
+
+    boost::shared_ptr<const SQLite3Accessor> accessor_; // Accessor object
+    int last_status_;           // Last status received from sqlite3_step
+};
+
+// ... and return the iterator
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getDiffs(int id, uint32_t start, uint32_t end) const {
+    return (IteratorContextPtr(new DiffContext(shared_from_this(), id, start,
+                               end)));
+}
+
+
+
+pair<bool, int>
+SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
+    if (dbparameters_->updating_zone) {
+        isc_throw(DataSourceError,
+                  "duplicate zone update on SQLite3 data source");
+    }
+    if (dbparameters_->in_transaction) {
+        isc_throw(DataSourceError,
+                  "zone update attempt in another SQLite3 transaction");
+    }
+
+    const pair<bool, int> zone_info(getZone(zone_name));
+    if (!zone_info.first) {
+        return (zone_info);
+    }
+
+    StatementProcessor(*dbparameters_, BEGIN,
+                       "start an SQLite3 update transaction").exec();
+
+    if (replace) {
+        try {
+            StatementProcessor delzone_exec(*dbparameters_, DEL_ZONE_RECORDS,
+                                            "delete zone records");
+
+            sqlite3_stmt* stmt = dbparameters_->getStatement(DEL_ZONE_RECORDS);
+            sqlite3_clear_bindings(stmt);
+            if (sqlite3_bind_int(stmt, 1, zone_info.second) != SQLITE_OK) {
+                isc_throw(DataSourceError,
+                          "failed to bind SQLite3 parameter: " <<
+                          sqlite3_errmsg(dbparameters_->db_));
+            }
+
+            delzone_exec.exec();
+        } catch (const DataSourceError&) {
+            // Once we start a transaction, if something unexpected happens
+            // we need to rollback the transaction so that a subsequent update
+            // is still possible with this accessor.
+            StatementProcessor(*dbparameters_, ROLLBACK,
+                               "rollback an SQLite3 transaction").exec();
+            throw;
+        }
+    }
+
+    dbparameters_->in_transaction = true;
+    dbparameters_->updating_zone = true;
+    dbparameters_->updated_zone_id = zone_info.second;
+
+    return (zone_info);
+}
+
+void
+SQLite3Accessor::startTransaction() {
+    if (dbparameters_->in_transaction) {
+        isc_throw(DataSourceError,
+                  "duplicate transaction on SQLite3 data source");
+    }
+
+    StatementProcessor(*dbparameters_, BEGIN,
+                       "start an SQLite3 transaction").exec();
+    dbparameters_->in_transaction = true;
+}
+
+void
+SQLite3Accessor::commit() {
+    if (!dbparameters_->in_transaction) {
+        isc_throw(DataSourceError, "performing commit on SQLite3 "
+                  "data source without transaction");
+    }
+
+    StatementProcessor(*dbparameters_, COMMIT,
+                       "commit an SQLite3 transaction").exec();
+    dbparameters_->in_transaction = false;
+    dbparameters_->updated_zone_id = -1;
+}
+
+void
+SQLite3Accessor::rollback() {
+    if (!dbparameters_->in_transaction) {
+        isc_throw(DataSourceError, "performing rollback on SQLite3 "
+                  "data source without transaction");
+    }
+
+    StatementProcessor(*dbparameters_, ROLLBACK,
+                       "rollback an SQLite3 transaction").exec();
+    dbparameters_->in_transaction = false;
+    dbparameters_->updated_zone_id = -1;
+}
+
+namespace {
+// Commonly used code sequence for adding/deleting record
+template <typename COLUMNS_TYPE>
+void
+doUpdate(SQLite3Parameters& dbparams, StatementID stmt_id,
+         COLUMNS_TYPE update_params, const char* exec_desc)
+{
+    sqlite3_stmt* const stmt = dbparams.getStatement(stmt_id);
+    StatementProcessor executer(dbparams, stmt_id, exec_desc);
+
+    int param_id = 0;
+    if (sqlite3_bind_int(stmt, ++param_id, dbparams.updated_zone_id)
+        != SQLITE_OK) {
+        isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+                  sqlite3_errmsg(dbparams.db_));
+    }
+    const size_t column_count =
+        sizeof(update_params) / sizeof(update_params[0]);
+    for (int i = 0; i < column_count; ++i) {
+        // The old sqlite3 data source API assumes NULL for an empty column.
+        // We need to provide compatibility at least for now.
+        if (sqlite3_bind_text(stmt, ++param_id,
+                              update_params[i].empty() ? NULL :
+                              update_params[i].c_str(),
+                              -1, SQLITE_TRANSIENT) != SQLITE_OK) {
+            isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+                      sqlite3_errmsg(dbparams.db_));
+        }
+    }
+    executer.exec();
+}
+}
+
+void
+SQLite3Accessor::addRecordToZone(const string (&columns)[ADD_COLUMN_COUNT]) {
+    if (!dbparameters_->updating_zone) {
+        isc_throw(DataSourceError, "adding record to SQLite3 "
+                  "data source without transaction");
+    }
+    doUpdate<const string (&)[DatabaseAccessor::ADD_COLUMN_COUNT]>(
+        *dbparameters_, ADD_RECORD, columns, "add record to zone");
+}
+
+void
+SQLite3Accessor::deleteRecordInZone(const string (&params)[DEL_PARAM_COUNT]) {
+    if (!dbparameters_->updating_zone) {
+        isc_throw(DataSourceError, "deleting record in SQLite3 "
+                  "data source without transaction");
+    }
+    doUpdate<const string (&)[DatabaseAccessor::DEL_PARAM_COUNT]>(
+        *dbparameters_, DEL_RECORD, params, "delete record from zone");
+}
+
+void
+SQLite3Accessor::addRecordDiff(int zone_id, uint32_t serial,
+                               DiffOperation operation,
+                               const std::string (&params)[DIFF_PARAM_COUNT])
+{
+    if (!dbparameters_->updating_zone) {
+        isc_throw(DataSourceError, "adding record diff without update "
+                  "transaction on " << getDBName());
+    }
+    if (zone_id != dbparameters_->updated_zone_id) {
+        isc_throw(DataSourceError, "bad zone ID for adding record diff on "
+                  << getDBName() << ": " << zone_id << ", must be "
+                  << dbparameters_->updated_zone_id);
+    }
+
+    sqlite3_stmt* const stmt = dbparameters_->getStatement(ADD_RECORD_DIFF);
+    StatementProcessor executer(*dbparameters_, ADD_RECORD_DIFF,
+                                "add record diff");
+    int param_id = 0;
+    if (sqlite3_bind_int(stmt, ++param_id, zone_id)
+        != SQLITE_OK) {
+        isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+                  sqlite3_errmsg(dbparameters_->db_));
+    }
+    if (sqlite3_bind_int64(stmt, ++param_id, serial)
+        != SQLITE_OK) {
+        isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+                  sqlite3_errmsg(dbparameters_->db_));
+    }
+    if (sqlite3_bind_int(stmt, ++param_id, operation)
+        != SQLITE_OK) {
+        isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+                  sqlite3_errmsg(dbparameters_->db_));
+    }
+    for (int i = 0; i < DIFF_PARAM_COUNT; ++i) {
+        if (sqlite3_bind_text(stmt, ++param_id, params[i].c_str(),
+                              -1, SQLITE_TRANSIENT) != SQLITE_OK) {
+            isc_throw(DataSourceError, "failed to bind SQLite3 parameter: " <<
+                      sqlite3_errmsg(dbparameters_->db_));
+        }
+    }
+    executer.exec();
+}
+
+vector<vector<string> >
+SQLite3Accessor::getRecordDiff(int zone_id) {
+    sqlite3_stmt* const stmt = dbparameters_->getStatement(GET_RECORD_DIFF);
+    sqlite3_bind_int(stmt, 1, zone_id);
+
+    vector<vector<string> > result;
+    while (sqlite3_step(stmt) == SQLITE_ROW) {
+        vector<string> row_result;
+        for (int i = 0; i < 6; ++i) {
+            row_result.push_back(convertToPlainChar(sqlite3_column_text(stmt,
+                                                                        i),
+                                                    dbparameters_->db_));
+        }
+        result.push_back(row_result);
+    }
+    sqlite3_reset(stmt);
+
+    return (result);
+}
+
+std::string
+SQLite3Accessor::findPreviousName(int zone_id, const std::string& rname)
+    const
+{
+    sqlite3_stmt* const stmt = dbparameters_->getStatement(FIND_PREVIOUS);
+    sqlite3_reset(stmt);
+    sqlite3_clear_bindings(stmt);
+
+    if (sqlite3_bind_int(stmt, 1, zone_id) != SQLITE_OK) {
+        isc_throw(SQLite3Error, "Could not bind zone ID " << zone_id <<
+                  " to SQL statement (find previous): " <<
+                  sqlite3_errmsg(dbparameters_->db_));
+    }
+    if (sqlite3_bind_text(stmt, 2, rname.c_str(), -1, SQLITE_STATIC) !=
+        SQLITE_OK) {
+        isc_throw(SQLite3Error, "Could not bind name " << rname <<
+                  " to SQL statement (find previous): " <<
+                  sqlite3_errmsg(dbparameters_->db_));
+    }
+
+    std::string result;
+    const int rc = sqlite3_step(stmt);
+    if (rc == SQLITE_ROW) {
+        // We found it
+        result = convertToPlainChar(sqlite3_column_text(stmt, 0),
+                                    dbparameters_->db_);
+    }
+    sqlite3_reset(stmt);
+
+    if (rc == SQLITE_DONE) {
+        // No NSEC records here, this DB doesn't support DNSSEC or
+        // we asked before the apex
+        isc_throw(isc::NotImplemented, "The zone doesn't support DNSSEC or "
+                  "query before apex");
+    }
+
+    if (rc != SQLITE_ROW && rc != SQLITE_DONE) {
+        // Some kind of error
+        isc_throw(SQLite3Error, "Could not get data for previous name");
+    }
+
+    return (result);
+}
+
+namespace {
+void
+addError(ElementPtr errors, const std::string& error) {
+    if (errors != ElementPtr() && errors->getType() == Element::list) {
+        errors->add(Element::create(error));
+    }
+}
+
+bool
+checkConfig(ConstElementPtr config, ElementPtr errors) {
+    /* Specific configuration is under discussion, right now this accepts
+     * the 'old' configuration, see header file
+     */
+    bool result = true;
+
+    if (!config || config->getType() != Element::map) {
+        addError(errors, "Base config for SQlite3 backend must be a map");
+        result = false;
+    } else {
+        if (!config->contains(CONFIG_ITEM_DATABASE_FILE)) {
+            addError(errors,
+                     "Config for SQlite3 backend does not contain a '"
+                     CONFIG_ITEM_DATABASE_FILE
+                     "' value");
+            result = false;
+        } else if (!config->get(CONFIG_ITEM_DATABASE_FILE) ||
+                   config->get(CONFIG_ITEM_DATABASE_FILE)->getType() !=
+                   Element::string) {
+            addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+                     " in SQLite3 backend is not a string");
+            result = false;
+        } else if (config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue() ==
+                   "") {
+            addError(errors, "value of " CONFIG_ITEM_DATABASE_FILE
+                     " in SQLite3 backend is empty");
+            result = false;
+        }
+    }
+
+    return (result);
+}
+
+} // end anonymous namespace
+
+DataSourceClient *
+createInstance(isc::data::ConstElementPtr config, std::string& error) {
+    ElementPtr errors(Element::createList());
+    if (!checkConfig(config, errors)) {
+        error = "Configuration error: " + errors->str();
+        return (NULL);
+    }
+    std::string dbfile = config->get(CONFIG_ITEM_DATABASE_FILE)->stringValue();
+    try {
+        boost::shared_ptr<DatabaseAccessor> sqlite3_accessor(
+            new SQLite3Accessor(dbfile, "IN")); // XXX: avoid hardcode RR class
+        return (new DatabaseClient(isc::dns::RRClass::IN(), sqlite3_accessor));
+    } catch (const std::exception& exc) {
+        error = std::string("Error creating sqlite3 datasource: ") + exc.what();
+        return (NULL);
+    } catch (...) {
+        error = std::string("Error creating sqlite3 datasource, "
+                            "unknown exception");
+        return (NULL);
+    }
+}
+
+void destroyInstance(DataSourceClient* instance) {
+    delete instance;
+}
+
+} // end of namespace datasrc
+} // end of namespace isc
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
new file mode 100644
index 0000000..08be824
--- /dev/null
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -0,0 +1,283 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#ifndef __DATASRC_SQLITE3_ACCESSOR_H
+#define __DATASRC_SQLITE3_ACCESSOR_H
+
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+
+#include <exceptions/exceptions.h>
+
+#include <boost/enable_shared_from_this.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <string>
+
+#include <cc/data.h>
+
+namespace isc {
+namespace dns {
+class RRClass;
+}
+
+namespace datasrc {
+
+/**
+ * \brief Low-level database error
+ *
+ * This exception is thrown when the SQLite library complains about something.
+ * It might mean corrupt database file, invalid request or that something is
+ * rotten in the library.
+ */
+class SQLite3Error : public DataSourceError {
+public:
+    SQLite3Error(const char* file, size_t line, const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
+/**
+ * \brief Too Much Data
+ *
+ * Thrown if a query expecting a certain number of rows back returned too
+ * many rows.
+ */
+class TooMuchData : public DataSourceError {
+public:
+    TooMuchData(const char* file, size_t line, const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
+/**
+ * \brief Too Little Data
+ *
+ * Thrown if a query expecting a certain number of rows back returned too
+ * few rows (including none).
+ */
+class TooLittleData : public DataSourceError {
+public:
+    TooLittleData(const char* file, size_t line, const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
+struct SQLite3Parameters;
+
+/**
+ * \brief Concrete implementation of DatabaseAccessor for SQLite3 databases
+ *
+ * This opens one database file with our schema and serves data from there.
+ * According to the design, it doesn't interpret the data in any way, it just
+ * provides unified access to the DB.
+ */
+class SQLite3Accessor : public DatabaseAccessor,
+    public boost::enable_shared_from_this<SQLite3Accessor> {
+public:
+    /**
+     * \brief Constructor
+     *
+     * This opens the database and becomes ready to serve data from there.
+     *
+     * \exception SQLite3Error will be thrown if the given database file
+     * doesn't work (it is broken, doesn't exist and can't be created, etc).
+     *
+     * \param filename The database file to be used.
+     * \param rrclass Textual representation of RR class ("IN", "CH", etc),
+     *     specifying which class of data it should serve (while the database
+     *     file can contain multiple classes of data, a single accessor can
+     *     work with only one class).
+     */
+    SQLite3Accessor(const std::string& filename, const std::string& rrclass);
+
+    /**
+     * \brief Destructor
+     *
+     * Closes the database.
+     */
+    ~SQLite3Accessor();
+
+    /// This implementation internally opens a new sqlite3 database for the
+    /// same file name specified in the constructor of the original accessor.
+    virtual boost::shared_ptr<DatabaseAccessor> clone();
+
+    /**
+     * \brief Look up a zone
+     *
+     * This implements the getZone from DatabaseAccessor and looks up a zone
+     * in the data. It looks for a zone with the exact given origin and class
+     * passed to the constructor.
+     *
+     * \exception SQLite3Error if something about the database is broken.
+     *
+     * \param name The (fully qualified) domain name of zone to look up
+     * \return The pair contains if the lookup was successful in the first
+     *     element and the zone id in the second if it was.
+     */
+    virtual std::pair<bool, int> getZone(const std::string& name) const;
+
+    /** \brief Look up all resource records for a name
+     *
+     * This implements the getRecords() method from DatabaseAccessor
+     *
+     * \exception SQLite3Error if there is an sqlite3 error when performing
+     *                         the query
+     *
+     * \param name the name to look up
+     * \param id the zone id, as returned by getZone()
+     * \param subdomains Match subdomains instead of the name.
+     * \return Iterator that contains all records with the given name
+     */
+    virtual IteratorContextPtr getRecords(const std::string& name,
+                                          int id,
+                                          bool subdomains = false) const;
+
+    /** \brief Look up all resource records for a zone
+     *
+     * This implements the getRecords() method from DatabaseAccessor
+     *
+     * \exception SQLite3Error if there is an sqlite3 error when performing
+     *                         the query
+     *
+     * \param id the zone id, as returned by getZone()
+     * \return Iterator that contains all records in the given zone
+     */
+    virtual IteratorContextPtr getAllRecords(int id) const;
+
+    /** \brief Creates an iterator context for a set of differences.
+     *
+     * Implements the getDiffs() method from DatabaseAccessor
+     *
+     * \exception NoSuchSerial if either of the versions do not exist in
+     *            the difference table.
+     * \exception SQLite3Error if there is an sqlite3 error when performing
+     *            the query
+     *
+     * \param id The ID of the zone, returned from getZone().
+     * \param start The SOA serial number of the version of the zone from
+     *        which the difference sequence should start.
+     * \param end The SOA serial number of the version of the zone at which
+     *        the difference sequence should end.
+     *
+     * \return Iterator containing difference records.
+     */
+    virtual IteratorContextPtr
+    getDiffs(int id, uint32_t start, uint32_t end) const;
+
+
+    virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
+                                                 bool replace);
+
+    virtual void startTransaction();
+
+    /// \note we are quite impatient here: it's quite possible that the COMMIT
+    /// fails due to other process performing SELECT on the same database
+    /// (consider the case where COMMIT is done by xfrin or dynamic update
+    /// server while an authoritative server is busy reading the DB).
+    /// In a future version we should probably need to introduce some retry
+    /// attempt and/or increase timeout before giving up the COMMIT, even
+    /// if it still doesn't guarantee 100% success.  Right now this
+    /// implementation throws a \c DataSourceError exception in such a case.
+    virtual void commit();
+
+    /// \note In SQLite3 rollback can fail if there's another unfinished
+    /// statement is performed for the same database structure.
+    /// Although it's not expected to happen in our expected usage, it's not
+    /// guaranteed to be prevented at the API level.  If it ever happens, this
+    /// method throws a \c DataSourceError exception.  It should be
+    /// considered a bug of the higher level application program.
+    virtual void rollback();
+
+    virtual void addRecordToZone(
+        const std::string (&columns)[ADD_COLUMN_COUNT]);
+
+    virtual void deleteRecordInZone(
+        const std::string (&params)[DEL_PARAM_COUNT]);
+
+    /// This derived version of the method prepares an SQLite3 statement
+    /// for adding the diff first time it's called, and if it fails throws
+    // an \c SQLite3Error exception.
+    virtual void addRecordDiff(
+        int zone_id, uint32_t serial, DiffOperation operation,
+        const std::string (&params)[DIFF_PARAM_COUNT]);
+
+    // A short term method for tests until we implement more complete
+    // API to retrieve diffs (#1330).  It returns all records of the diffs
+    // table whose zone_id column is identical to the given value.
+    // Since this is a short term workaround, it ignores some corner cases
+    // (such as an SQLite3 execution failure) and is not very efficient,
+    // in favor of brevity.  Once #1330 is completed, this method must be
+    // removed, and the tests using this method must be rewritten using the
+    // official API.
+    std::vector<std::vector<std::string> > getRecordDiff(int zone_id);
+
+    /// The SQLite3 implementation of this method returns a string starting
+    /// with a fixed prefix of "sqlite3_" followed by the DB file name
+    /// removing any path name.  For example, for the DB file
+    /// /somewhere/in/the/system/bind10.sqlite3, this method will return
+    /// "sqlite3_bind10.sqlite3".
+    virtual const std::string& getDBName() const { return (database_name_); }
+
+    /// \brief Concrete implementation of the pure virtual method
+    virtual std::string findPreviousName(int zone_id, const std::string& rname)
+        const;
+
+private:
+    /// \brief Private database data
+    boost::scoped_ptr<SQLite3Parameters> dbparameters_;
+    /// \brief The filename of the DB (necessary for clone())
+    const std::string filename_;
+    /// \brief The class for which the queries are done
+    const std::string class_;
+    /// \brief Database name
+    const std::string database_name_;
+
+    /// \brief Opens the database
+    void open(const std::string& filename);
+    /// \brief Closes the database
+    void close();
+
+    /// \brief SQLite3 implementation of IteratorContext for all records
+    class Context;
+    friend class Context;
+    /// \brief SQLite3 implementation of IteratorContext for differences
+    class DiffContext;
+    friend class DiffContext;
+};
+
+/// \brief Creates an instance of the SQlite3 datasource client
+///
+/// Currently the configuration passed here must be a MapElement, containing
+/// one item called "database_file", whose value is a string
+///
+/// This configuration setup is currently under discussion and will change in
+/// the near future.
+///
+/// \param config The configuration for the datasource instance
+/// \param error This string will be set to an error message if an error occurs
+///              during initialization
+/// \return An instance of the sqlite3 datasource client, or NULL if there was
+///         an error
+extern "C" DataSourceClient* createInstance(isc::data::ConstElementPtr config,
+                                            std::string& error);
+
+/// \brief Destroy the instance created by createInstance()
+extern "C" void destroyInstance(DataSourceClient* instance);
+
+}
+}
+
+#endif  // __DATASRC_SQLITE3_CONNECTION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/datasrc/sqlite3_datasrc.cc b/src/lib/datasrc/sqlite3_datasrc.cc
index 18ee929..03b057c 100644
--- a/src/lib/datasrc/sqlite3_datasrc.cc
+++ b/src/lib/datasrc/sqlite3_datasrc.cc
@@ -26,6 +26,8 @@
 #include <dns/rrset.h>
 #include <dns/rrsetlist.h>
 
+#define SQLITE_SCHEMA_VERSION 1
+
 using namespace std;
 using namespace isc::dns;
 using namespace isc::dns::rdata;
@@ -77,6 +79,8 @@ const char* const SCHEMA_LIST[] = {
     NULL
 };
 
+const char* const q_version_str = "SELECT version FROM schema_version";
+
 const char* const q_zone_str = "SELECT id FROM zones WHERE name=?1";
 
 const char* const q_record_str = "SELECT rdtype, ttl, sigtype, rdata "
@@ -254,7 +258,7 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
         }
         break;
     }
-    
+
     sqlite3_reset(query);
     sqlite3_clear_bindings(query);
 
@@ -295,7 +299,7 @@ Sqlite3DataSrc::findRecords(const Name& name, const RRType& rdtype,
     //
     sqlite3_reset(dbparameters->q_count_);
     sqlite3_clear_bindings(dbparameters->q_count_);
-    
+
     rc = sqlite3_bind_int(dbparameters->q_count_, 1, zone_id);
     if (rc != SQLITE_OK) {
         isc_throw(Sqlite3Error, "Could not bind zone ID " << zone_id <<
@@ -653,29 +657,90 @@ prepare(sqlite3* const db, const char* const statement) {
     return (prepared);
 }
 
-void
-checkAndSetupSchema(Sqlite3Initializer* initializer) {
-    sqlite3* const db = initializer->params_.db_;
+// small function to sleep for 0.1 seconds, needed when waiting for
+// exclusive database locks (which should only occur on startup, and only
+// when the database has not been created yet)
+void do_sleep() {
+    struct timespec req;
+    req.tv_sec = 0;
+    req.tv_nsec = 100000000;
+    nanosleep(&req, NULL);
+}
 
+// returns the schema version if the schema version table exists
+// returns -1 if it does not
+int check_schema_version(sqlite3* db) {
     sqlite3_stmt* prepared = NULL;
-    if (sqlite3_prepare_v2(db, "SELECT version FROM schema_version", -1,
-                           &prepared, NULL) == SQLITE_OK &&
-        sqlite3_step(prepared) == SQLITE_ROW) {
-        initializer->params_.version_ = sqlite3_column_int(prepared, 0);
-        sqlite3_finalize(prepared);
-    } else {
-        logger.info(DATASRC_SQLITE_SETUP);
-        if (prepared != NULL) {
-            sqlite3_finalize(prepared);
+    // At this point in time, the database might be exclusively locked, in
+    // which case even prepare() will return BUSY, so we may need to try a
+    // few times
+    for (size_t i = 0; i < 50; ++i) {
+        int rc = sqlite3_prepare_v2(db, q_version_str, -1, &prepared, NULL);
+        if (rc == SQLITE_ERROR) {
+            // this is the error that is returned when the table does not
+            // exist
+            return (-1);
+        } else if (rc == SQLITE_OK) {
+            break;
+        } else if (rc != SQLITE_BUSY || i == 50) {
+            isc_throw(Sqlite3Error, "Unable to prepare version query: "
+                        << rc << " " << sqlite3_errmsg(db));
         }
+        do_sleep();
+    }
+    if (sqlite3_step(prepared) != SQLITE_ROW) {
+        isc_throw(Sqlite3Error,
+                    "Unable to query version: " << sqlite3_errmsg(db));
+    }
+    int version = sqlite3_column_int(prepared, 0);
+    sqlite3_finalize(prepared);
+    return (version);
+}
+
+// return db version
+int create_database(sqlite3* db) {
+    // try to get an exclusive lock. Once that is obtained, do the version
+    // check *again*, just in case this process was racing another
+    //
+    // try for 5 secs (50*0.1)
+    int rc;
+    logger.info(DATASRC_SQLITE_SETUP);
+    for (size_t i = 0; i < 50; ++i) {
+        rc = sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL,
+                            NULL);
+        if (rc == SQLITE_OK) {
+            break;
+        } else if (rc != SQLITE_BUSY || i == 50) {
+            isc_throw(Sqlite3Error, "Unable to acquire exclusive lock "
+                        "for database creation: " << sqlite3_errmsg(db));
+        }
+        do_sleep();
+    }
+    int schema_version = check_schema_version(db);
+    if (schema_version == -1) {
         for (int i = 0; SCHEMA_LIST[i] != NULL; ++i) {
             if (sqlite3_exec(db, SCHEMA_LIST[i], NULL, NULL, NULL) !=
                 SQLITE_OK) {
                 isc_throw(Sqlite3Error,
-                          "Failed to set up schema " << SCHEMA_LIST[i]);
+                        "Failed to set up schema " << SCHEMA_LIST[i]);
             }
         }
+        sqlite3_exec(db, "COMMIT TRANSACTION", NULL, NULL, NULL);
+        return (SQLITE_SCHEMA_VERSION);
+    } else {
+        return (schema_version);
+    }
+}
+
+void
+checkAndSetupSchema(Sqlite3Initializer* initializer) {
+    sqlite3* const db = initializer->params_.db_;
+
+    int schema_version = check_schema_version(db);
+    if (schema_version != SQLITE_SCHEMA_VERSION) {
+        schema_version = create_database(db);
     }
+    initializer->params_.version_ = schema_version;
 
     initializer->params_.q_zone_ = prepare(db, q_zone_str);
     initializer->params_.q_record_ = prepare(db, q_record_str);
diff --git a/src/lib/datasrc/static_datasrc.cc b/src/lib/datasrc/static_datasrc.cc
index 65229a0..fd43e1c 100644
--- a/src/lib/datasrc/static_datasrc.cc
+++ b/src/lib/datasrc/static_datasrc.cc
@@ -70,6 +70,7 @@ StaticDataSrcImpl::StaticDataSrcImpl() :
     authors = RRsetPtr(new RRset(authors_name, RRClass::CH(),
                                  RRType::TXT(), RRTTL(0)));
     authors->addRdata(generic::TXT("Chen Zhengzhang")); // Jerry
+    authors->addRdata(generic::TXT("Dmitriy Volodin"));
     authors->addRdata(generic::TXT("Evan Hunt"));
     authors->addRdata(generic::TXT("Haidong Wang")); // Ocean
     authors->addRdata(generic::TXT("Han Feng"));
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index fbcf9c9..6dd6b0a 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -1,8 +1,12 @@
+SUBDIRS = testdata
+
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_builddir)/src/lib/dns -I$(top_srcdir)/src/lib/dns
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 AM_CPPFLAGS += $(SQLITE_CFLAGS)
-AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(srcdir)/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_srcdir)/testdata\"
+AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_builddir)/testdata\"
+AM_CPPFLAGS += -DINSTALL_PROG=\"$(abs_top_srcdir)/install-sh\"
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
@@ -14,33 +18,72 @@ CLEANFILES = *.gcno *.gcda
 
 TESTS =
 if HAVE_GTEST
-TESTS += run_unittests
-run_unittests_SOURCES = run_unittests.cc
-run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.h
-run_unittests_SOURCES += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
+TESTS += run_unittests run_unittests_sqlite3 run_unittests_memory
+
+#
+# For each specific datasource, there is a separate binary that includes
+# the code itself (we can't unittest through the public API). These need
+# to be separate because the included code, by design, contains conflicting
+# symbols.
+# We also have a 'general' run_unittests with non-datasource-specific tests
+#
+
+# First define the parts shared by all
+common_sources = run_unittests.cc
+common_sources += $(top_srcdir)/src/lib/dns/tests/unittest_util.h
+common_sources += $(top_srcdir)/src/lib/dns/tests/unittest_util.cc
+
+common_ldadd  = $(GTEST_LDADD)
+common_ldadd += $(SQLITE_LIBS)
+common_ldadd += $(top_builddir)/src/lib/datasrc/libdatasrc.la
+common_ldadd += $(top_builddir)/src/lib/dns/libdns++.la
+common_ldadd += $(top_builddir)/src/lib/util/libutil.la
+common_ldadd += $(top_builddir)/src/lib/log/liblog.la
+common_ldadd += $(top_builddir)/src/lib/exceptions/libexceptions.la
+common_ldadd += $(top_builddir)/src/lib/cc/libcc.la
+common_ldadd += $(top_builddir)/src/lib/testutils/libtestutils.la
+common_ldadd += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+
+
+# The general tests
+run_unittests_SOURCES = $(common_sources)
 run_unittests_SOURCES += datasrc_unittest.cc
-run_unittests_SOURCES += sqlite3_unittest.cc
 run_unittests_SOURCES += static_unittest.cc
 run_unittests_SOURCES += query_unittest.cc
 run_unittests_SOURCES += cache_unittest.cc
 run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
 run_unittests_SOURCES += rbtree_unittest.cc
-run_unittests_SOURCES += zonetable_unittest.cc
-run_unittests_SOURCES += memory_datasrc_unittest.cc
 run_unittests_SOURCES += logger_unittest.cc
+run_unittests_SOURCES += client_unittest.cc
 
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
 
-run_unittests_LDADD  = $(GTEST_LDADD)
-run_unittests_LDADD += $(SQLITE_LIBS)
-run_unittests_LDADD += $(top_builddir)/src/lib/datasrc/libdatasrc.la
-run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
-run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
-run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
-run_unittests_LDADD += $(top_builddir)/src/lib/cc/libcc.la
-run_unittests_LDADD += $(top_builddir)/src/lib/testutils/libtestutils.la
-run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD = $(common_ldadd)
+
+
+# SQlite3 datasource tests
+run_unittests_sqlite3_SOURCES = $(common_sources)
+run_unittests_sqlite3_SOURCES += database_unittest.cc
+run_unittests_sqlite3_SOURCES += sqlite3_unittest.cc
+run_unittests_sqlite3_SOURCES += sqlite3_accessor_unittest.cc
+run_unittests_sqlite3_SOURCES += $(top_srcdir)/src/lib/datasrc/sqlite3_accessor.cc
+
+run_unittests_sqlite3_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_sqlite3_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
+
+run_unittests_sqlite3_LDADD = $(common_ldadd)
+
+# In-memory datasource tests
+run_unittests_memory_SOURCES = $(common_sources)
+run_unittests_memory_SOURCES += memory_datasrc_unittest.cc
+run_unittests_memory_SOURCES += $(top_srcdir)/src/lib/datasrc/memory_datasrc.cc
+
+run_unittests_memory_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_memory_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
+
+run_unittests_memory_LDADD = $(common_ldadd)
+
 endif
 
 noinst_PROGRAMS = $(TESTS)
@@ -57,3 +100,24 @@ EXTRA_DIST += testdata/sql1.example.com.signed
 EXTRA_DIST += testdata/sql2.example.com.signed
 EXTRA_DIST += testdata/test-root.sqlite3
 EXTRA_DIST += testdata/test.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
+EXTRA_DIST += testdata/rwtest.sqlite3
+EXTRA_DIST += testdata/diffs.sqlite3
+
+# For the factory unit tests, we need to specify that we want
+# the loadable backend libraries from the build tree, and not from 
+# the installation directory. Therefore we build it into a separate
+# binary, and call that from check-local with B10_FROM_BUILD set.
+# Also, we only want to do this when static building is not used,
+# since it will cause various troubles with static link such as
+# "missing" symbols in the static object for the module.
+if !USE_STATIC_LINK
+noinst_PROGRAMS+=run_unittests_factory
+run_unittests_factory_SOURCES = $(common_sources)
+run_unittests_factory_SOURCES += factory_unittest.cc
+run_unittests_factory_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_factory_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
+run_unittests_factory_LDADD = $(common_ldadd)
+check-local:
+	B10_FROM_BUILD=${abs_top_builddir} ./run_unittests_factory
+endif
diff --git a/src/lib/datasrc/tests/cache_unittest.cc b/src/lib/datasrc/tests/cache_unittest.cc
index 96beae0..1325f64 100644
--- a/src/lib/datasrc/tests/cache_unittest.cc
+++ b/src/lib/datasrc/tests/cache_unittest.cc
@@ -202,15 +202,15 @@ TEST_F(CacheTest, retrieveFail) {
 }
 
 TEST_F(CacheTest, expire) {
-    // Insert "foo" with a duration of 2 seconds; sleep 3.  The
+    // Insert "foo" with a duration of 1 seconds; sleep 2.  The
     // record should not be returned from the cache even though it's
     // at the top of the cache.
     RRsetPtr aaaa(new RRset(Name("foo"), RRClass::IN(), RRType::AAAA(),
                             RRTTL(0)));
     aaaa->addRdata(in::AAAA("2001:db8:3:bb::5"));
-    cache.addPositive(aaaa, 0, 2);
+    cache.addPositive(aaaa, 0, 1);
 
-    sleep(3);
+    sleep(2);
 
     RRsetPtr r;
     uint32_t f;
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
new file mode 100644
index 0000000..64ad25f
--- /dev/null
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -0,0 +1,59 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <utility>
+
+#include <datasrc/client.h>
+
+#include <dns/name.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using isc::dns::Name;
+
+namespace {
+
+/*
+ * The DataSourceClient can't be created as it has pure virtual methods.
+ * So we implement them as NOPs and test the other methods.
+ */
+class NopClient : public DataSourceClient {
+public:
+    virtual FindResult findZone(const isc::dns::Name&) const {
+        return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
+    }
+    virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool, bool)
+        const
+    {
+        return (ZoneUpdaterPtr());
+    }
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name&, uint32_t, uint32_t) const {
+        isc_throw(isc::NotImplemented, "Journaling isn't supported "
+                  "in Nop data source");
+    }
+};
+
+class ClientTest : public ::testing::Test {
+public:
+    NopClient client_;
+};
+
+// The default implementation is NotImplemented
+TEST_F(ClientTest, defaultIterator) {
+    EXPECT_THROW(client_.getIterator(Name(".")), isc::NotImplemented);
+}
+
+}
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
new file mode 100644
index 0000000..920c9a2
--- /dev/null
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -0,0 +1,3260 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdlib.h>
+
+#include <boost/shared_ptr.hpp>
+#include <boost/lexical_cast.hpp>
+
+#include <gtest/gtest.h>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/rrttl.h>
+#include <dns/rrset.h>
+#include <exceptions/exceptions.h>
+
+#include <datasrc/database.h>
+#include <datasrc/zone.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include <testutils/dnsmessage_test.h>
+
+#include <map>
+#include <vector>
+
+using namespace isc::datasrc;
+using namespace std;
+// don't import the entire boost namespace.  It will unexpectedly hide uint32_t
+// for some systems.
+using boost::shared_ptr;
+using boost::dynamic_pointer_cast;
+using boost::lexical_cast;
+using namespace isc::dns;
+
+namespace {
+
+// Imaginary zone IDs used in the mock accessor below.
+const int READONLY_ZONE_ID = 42;
+const int WRITABLE_ZONE_ID = 4200;
+
+// Commonly used test data
+const char* const TEST_RECORDS[][5] = {
+    // some plain data
+    {"www.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"www.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+    {"www.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+    {"www.example.org.", "NSEC", "3600", "", "www2.example.org. A AAAA NSEC RRSIG"},
+    {"www.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    {"www2.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"www2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+    {"www2.example.org.", "A", "3600", "", "192.0.2.2"},
+
+    {"cname.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+    // some DNSSEC-'signed' data
+    {"signed1.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"signed1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    {"signed1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE"},
+    {"signed1.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+    {"signed1.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+    {"signed1.example.org.", "RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    {"signedcname1.example.org.", "CNAME", "3600", "", "www.example.org."},
+    {"signedcname1.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    // special case might fail; sig is for cname, which isn't there (should be ignored)
+    // (ignoring of 'normal' other type is done above by www.)
+    {"acnamesig1.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"acnamesig1.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"acnamesig1.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    // let's pretend we have a database that is not careful
+    // about the order in which it returns data
+    {"signed2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"signed2.example.org.", "AAAA", "3600", "", "2001:db8::2"},
+    {"signed2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE"},
+    {"signed2.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"signed2.example.org.", "RRSIG", "3600", "", "AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"signed2.example.org.", "AAAA", "3600", "", "2001:db8::1"},
+
+    {"signedcname2.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"signedcname2.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+    {"acnamesig2.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"acnamesig2.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"acnamesig2.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    {"acnamesig3.example.org.", "RRSIG", "3600", "", "CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"acnamesig3.example.org.", "RRSIG", "3600", "", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"acnamesig3.example.org.", "A", "3600", "", "192.0.2.1"},
+
+    {"ttldiff1.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"ttldiff1.example.org.", "A", "360", "", "192.0.2.2"},
+
+    {"ttldiff2.example.org.", "A", "360", "", "192.0.2.1"},
+    {"ttldiff2.example.org.", "A", "3600", "", "192.0.2.2"},
+
+    // also add some intentionally bad data
+    {"badcname1.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"badcname1.example.org.", "CNAME", "3600", "", "www.example.org."},
+
+    {"badcname2.example.org.", "CNAME", "3600", "", "www.example.org."},
+    {"badcname2.example.org.", "A", "3600", "", "192.0.2.1"},
+
+    {"badcname3.example.org.", "CNAME", "3600", "", "www.example.org."},
+    {"badcname3.example.org.", "CNAME", "3600", "", "www.example2.org."},
+
+    {"badrdata.example.org.", "A", "3600", "", "bad"},
+
+    {"badtype.example.org.", "BAD_TYPE", "3600", "", "192.0.2.1"},
+
+    {"badttl.example.org.", "A", "badttl", "", "192.0.2.1"},
+
+    {"badsig.example.org.", "A", "badttl", "", "192.0.2.1"},
+    {"badsig.example.org.", "RRSIG", "3600", "", "A 5 3 3600 somebaddata 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    {"badsigtype.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"badsigtype.example.org.", "RRSIG", "3600", "TXT", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    // Data for testing delegation (with NS and DNAME)
+    {"delegation.example.org.", "NS", "3600", "", "ns.example.com."},
+    {"delegation.example.org.", "NS", "3600", "",
+     "ns.delegation.example.org."},
+    {"delegation.example.org.", "DS", "3600", "", "1 RSAMD5 2 abcd"},
+    {"delegation.example.org.", "RRSIG", "3600", "", "NS 5 3 3600 "
+     "20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"ns.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"deep.below.delegation.example.org.", "A", "3600", "", "192.0.2.1"},
+
+    {"dname.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"dname.example.org.", "DNAME", "3600", "", "dname.example.com."},
+    {"dname.example.org.", "RRSIG", "3600", "",
+     "DNAME 5 3 3600 20000101000000 20000201000000 12345 "
+     "example.org. FAKEFAKEFAKE"},
+
+    {"below.dname.example.org.", "A", "3600", "", "192.0.2.1"},
+
+    // Broken NS
+    {"brokenns1.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"brokenns1.example.org.", "NS", "3600", "", "ns.example.com."},
+
+    {"brokenns2.example.org.", "NS", "3600", "", "ns.example.com."},
+    {"brokenns2.example.org.", "A", "3600", "", "192.0.2.1"},
+
+    // Now double DNAME, to test failure mode
+    {"baddname.example.org.", "DNAME", "3600", "", "dname1.example.com."},
+    {"baddname.example.org.", "DNAME", "3600", "", "dname2.example.com."},
+
+    // Put some data into apex (including NS) so we can check our NS
+    // doesn't break anything
+    {"example.org.", "SOA", "3600", "", "ns1.example.org. admin.example.org. "
+     "1234 3600 1800 2419200 7200" },
+    {"example.org.", "NS", "3600", "", "ns.example.com."},
+    {"example.org.", "A", "3600", "", "192.0.2.1"},
+    {"example.org.", "NSEC", "3600", "", "acnamesig1.example.org. NS A NSEC RRSIG"},
+    {"example.org.", "RRSIG", "3600", "", "SOA 5 3 3600 20000101000000 "
+              "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 "
+              "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"example.org.", "RRSIG", "3600", "", "NS 5 3 3600 20000101000000 "
+              "20000201000000 12345 example.org. FAKEFAKEFAKE"},
+
+    // This is because of empty domain test
+    {"a.b.example.org.", "A", "3600", "", "192.0.2.1"},
+
+    // Something for wildcards
+    {"*.wild.example.org.", "A", "3600", "", "192.0.2.5"},
+    {"*.wild.example.org.", "RRSIG", "3600", "A", "A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"*.wild.example.org.", "NSEC", "3600", "", "cancel.here.wild.example.org. A NSEC RRSIG"},
+    {"*.wild.example.org.", "RRSIG", "3600", "", "NSEC 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE"},
+    {"cancel.here.wild.example.org.", "AAAA", "3600", "", "2001:db8::5"},
+    {"delegatedwild.example.org.", "NS", "3600", "", "ns.example.com."},
+    {"*.delegatedwild.example.org.", "A", "3600", "", "192.0.2.5"},
+    {"wild.*.foo.example.org.", "A", "3600", "", "192.0.2.5"},
+    {"wild.*.foo.*.bar.example.org.", "A", "3600", "", "192.0.2.5"},
+    {"wild.*.foo.*.bar.example.org.", "NSEC", "3600", "",
+     "brokenns1.example.org. A NSEC"},
+    {"bao.example.org.", "NSEC", "3600", "", "wild.*.foo.*.bar.example.org. NSEC"},
+    {"*.cnamewild.example.org.", "CNAME", "3600", "", "www.example.org."},
+    {"*.dnamewild.example.org.", "DNAME", "3600", "", "dname.example.com."},
+    {"*.nswild.example.org.", "NS", "3600", "", "ns.example.com."},
+    // For NSEC empty non-terminal
+    {"l.example.org.", "NSEC", "3600", "", "empty.nonterminal.example.org. NSEC"},
+    {"empty.nonterminal.example.org.", "A", "3600", "", "192.0.2.1"},
+    // Invalid rdata
+    {"invalidrdata.example.org.", "A", "3600", "", "Bunch of nonsense"},
+    {"invalidrdata2.example.org.", "A", "3600", "", "192.0.2.1"},
+    {"invalidrdata2.example.org.", "RRSIG", "3600", "", "Nonsense"},
+
+    {NULL, NULL, NULL, NULL, NULL},
+};
+
+/*
+ * An accessor with minimum implementation, keeping the original
+ * "NotImplemented" methods.
+ */
+class NopAccessor : public DatabaseAccessor {
+public:
+    NopAccessor() : database_name_("mock_database")
+    { }
+
+    virtual std::pair<bool, int> getZone(const std::string& name) const {
+        if (name == "example.org.") {
+            return (std::pair<bool, int>(true, READONLY_ZONE_ID));
+        } else if (name == "null.example.org.") {
+            return (std::pair<bool, int>(true, 13));
+        } else if (name == "empty.example.org.") {
+            return (std::pair<bool, int>(true, 0));
+        } else if (name == "bad.example.org.") {
+            return (std::pair<bool, int>(true, -1));
+        } else {
+            return (std::pair<bool, int>(false, 0));
+        }
+    }
+
+    virtual shared_ptr<DatabaseAccessor> clone() {
+        // This accessor is stateless, so we can simply return a new instance.
+        return (shared_ptr<DatabaseAccessor>(new NopAccessor));
+    }
+
+    virtual std::pair<bool, int> startUpdateZone(const std::string&, bool) {
+        // return dummy value.  unused anyway.
+        return (pair<bool, int>(true, 0));
+    }
+    virtual void startTransaction() {}
+    virtual void commit() {}
+    virtual void rollback() {}
+    virtual void addRecordToZone(const string (&)[ADD_COLUMN_COUNT]) {}
+    virtual void deleteRecordInZone(const string (&)[DEL_PARAM_COUNT]) {}
+    virtual void addRecordDiff(int, uint32_t, DiffOperation,
+                               const std::string (&)[DIFF_PARAM_COUNT]) {}
+
+    virtual const std::string& getDBName() const {
+        return (database_name_);
+    }
+
+    virtual IteratorContextPtr getRecords(const std::string&, int, bool)
+        const
+        {
+        isc_throw(isc::NotImplemented,
+                  "This database datasource can't be iterated");
+    }
+
+    virtual IteratorContextPtr getAllRecords(int) const {
+        isc_throw(isc::NotImplemented,
+                  "This database datasource can't be iterated");
+    }
+
+    virtual IteratorContextPtr getDiffs(int, uint32_t, uint32_t) const {
+        isc_throw(isc::NotImplemented,
+                  "This database datasource doesn't support diffs");
+    }
+
+    virtual std::string findPreviousName(int, const std::string&) const {
+        isc_throw(isc::NotImplemented,
+                  "This data source doesn't support DNSSEC");
+    }
+private:
+    const std::string database_name_;
+
+};
+
+/**
+ * Single journal entry in the mock database.
+ *
+ * All the members there are public for simplicity, as it only stores data.
+ * We use the implicit constructor and operator. The members can't be const
+ * because of the assignment operator (used in the vectors).
+ */
+struct JournalEntry {
+    JournalEntry(int id, uint32_t serial,
+                 DatabaseAccessor::DiffOperation operation,
+                 const std::string (&data)[DatabaseAccessor::DIFF_PARAM_COUNT])
+        : id_(id), serial_(serial), operation_(operation)
+    {
+        data_[DatabaseAccessor::DIFF_NAME] = data[DatabaseAccessor::DIFF_NAME];
+        data_[DatabaseAccessor::DIFF_TYPE] = data[DatabaseAccessor::DIFF_TYPE];
+        data_[DatabaseAccessor::DIFF_TTL] = data[DatabaseAccessor::DIFF_TTL];
+        data_[DatabaseAccessor::DIFF_RDATA] =
+            data[DatabaseAccessor::DIFF_RDATA];
+    }
+    JournalEntry(int id, uint32_t serial,
+                 DatabaseAccessor::DiffOperation operation,
+                 const std::string& name, const std::string& type,
+                 const std::string& ttl, const std::string& rdata):
+        id_(id), serial_(serial), operation_(operation)
+    {
+        data_[DatabaseAccessor::DIFF_NAME] = name;
+        data_[DatabaseAccessor::DIFF_TYPE] = type;
+        data_[DatabaseAccessor::DIFF_TTL] = ttl;
+        data_[DatabaseAccessor::DIFF_RDATA] = rdata;
+    }
+    int id_;
+    uint32_t serial_;
+    DatabaseAccessor::DiffOperation operation_;
+    std::string data_[DatabaseAccessor::DIFF_PARAM_COUNT];
+    bool operator==(const JournalEntry& other) const {
+        for (size_t i(0); i < DatabaseAccessor::DIFF_PARAM_COUNT; ++ i) {
+            if (data_[i] != other.data_[i]) {
+                return false;
+            }
+        }
+        // No need to check data here, checked above
+        return (id_ == other.id_ && serial_ == other.serial_ &&
+                operation_ == other.operation_);
+    }
+};
+
+/*
+ * A virtual database accessor that pretends it contains single zone --
+ * example.org.
+ *
+ * It has the same getZone method as NopConnection, but it provides
+ * implementation of the optional functionality.
+ */
+class MockAccessor : public NopAccessor {
+    // Type of mock database "row"s.  This is a map whose keys are the
+    // own names.  We internally sort them by the name comparison order.
+    struct NameCompare : public binary_function<string, string, bool> {
+        bool operator()(const string& n1, const string& n2) const {
+            return (Name(n1).compare(Name(n2)).getOrder() < 0);
+        }
+    };
+    typedef std::map<std::string,
+                     std::vector< std::vector<std::string> >,
+                     NameCompare > Domains;
+
+public:
+    MockAccessor() : rollbacked_(false), did_transaction_(false) {
+        readonly_records_ = &readonly_records_master_;
+        update_records_ = &update_records_master_;
+        empty_records_ = &empty_records_master_;
+        journal_entries_ = &journal_entries_master_;
+        fillData();
+    }
+
+    virtual shared_ptr<DatabaseAccessor> clone() {
+        shared_ptr<MockAccessor> cloned_accessor(new MockAccessor());
+        cloned_accessor->readonly_records_ = &readonly_records_master_;
+        cloned_accessor->update_records_ = &update_records_master_;
+        cloned_accessor->empty_records_ = &empty_records_master_;
+        cloned_accessor->journal_entries_ = &journal_entries_master_;
+        latest_clone_ = cloned_accessor;
+        return (cloned_accessor);
+    }
+
+    virtual void startTransaction() {
+        // Currently we only use this transaction for simple read-only
+        // operations.  So we just make a local copy of the data (we don't
+        // care about what happens after commit() or rollback()).
+        // Obviously as a consequence, if a test case tries to make multiple
+        // transactions on a single mock accessor it will fail.
+
+        // Check any attempt of multiple transactions
+        if (did_transaction_) {
+            isc_throw(isc::Unexpected, "MockAccessor::startTransaction() "
+                      "called multiple times - likely a bug in the test");
+        }
+
+        readonly_records_copy_ = *readonly_records_;
+        readonly_records_ = &readonly_records_copy_;
+        did_transaction_ = true;
+    }
+
+private:
+    class MockNameIteratorContext : public IteratorContext {
+    public:
+        MockNameIteratorContext(const MockAccessor& mock_accessor, int zone_id,
+                                const std::string& name, bool subdomains) :
+            searched_name_(name), cur_record_(0)
+        {
+            // 'hardcoded' names to trigger exceptions
+            // On these names some exceptions are thrown, to test the robustness
+            // of the find() method.
+            if (searched_name_ == "dsexception.in.search.") {
+                isc_throw(DataSourceError, "datasource exception on search");
+            } else if (searched_name_ == "iscexception.in.search.") {
+                isc_throw(isc::Exception, "isc exception on search");
+            } else if (searched_name_ == "basicexception.in.search.") {
+                throw std::exception();
+            }
+
+            cur_record_ = 0;
+            const Domains& cur_records = mock_accessor.getMockRecords(zone_id);
+            if (cur_records.count(name) > 0) {
+                    // we're not aiming for efficiency in this test, simply
+                    // copy the relevant vector from records
+                    cur_name = cur_records.find(name)->second;
+            } else if (subdomains) {
+                cur_name.clear();
+                // Just walk everything and check if it is a subdomain.
+                // If it is, just copy all data from there.
+                for (Domains::const_iterator i(cur_records.begin());
+                     i != cur_records.end(); ++i) {
+                    const Name local(i->first);
+                    if (local.compare(Name(name)).getRelation() ==
+                        isc::dns::NameComparisonResult::SUBDOMAIN) {
+                        cur_name.insert(cur_name.end(), i->second.begin(),
+                                        i->second.end());
+                    }
+                }
+            } else {
+                cur_name.clear();
+            }
+        }
+
+        virtual bool getNext(std::string (&columns)[COLUMN_COUNT]) {
+            if (searched_name_ == "dsexception.in.getnext.") {
+                isc_throw(DataSourceError, "datasource exception on getnextrecord");
+            } else if (searched_name_ == "iscexception.in.getnext.") {
+                isc_throw(isc::Exception, "isc exception on getnextrecord");
+            } else if (searched_name_ == "basicexception.in.getnext.") {
+                throw std::exception();
+            }
+
+            if (cur_record_ < cur_name.size()) {
+                for (size_t i = 0; i < COLUMN_COUNT; ++i) {
+                    columns[i] = cur_name[cur_record_][i];
+                }
+                cur_record_++;
+                return (true);
+            } else {
+                return (false);
+            }
+        }
+
+    private:
+        const std::string searched_name_;
+        int cur_record_;
+        std::vector< std::vector<std::string> > cur_name;
+    };
+
+    class MockIteratorContext : public IteratorContext {
+    private:
+        int step;
+        const Domains& domains_;
+    public:
+        MockIteratorContext(const Domains& domains) :
+            step(0), domains_(domains)
+        { }
+        virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+            // A special case: if the given set of domains is already empty,
+            // we always return false.
+            if (domains_.empty()) {
+                return (false);
+            }
+
+            // Return faked data for tests
+            switch (step ++) {
+                case 0:
+                    data[DatabaseAccessor::NAME_COLUMN] = "example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "A";
+                    data[DatabaseAccessor::TTL_COLUMN] = "3600";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+                    return (true);
+                case 1:
+                    data[DatabaseAccessor::NAME_COLUMN] = "example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "SOA";
+                    data[DatabaseAccessor::TTL_COLUMN] = "3600";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "ns1.example.org. admin.example.org. "
+                        "1234 3600 1800 2419200 7200";
+                    return (true);
+                case 2:
+                    data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "A";
+                    data[DatabaseAccessor::TTL_COLUMN] = "300";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+                    return (true);
+                case 3:
+                    data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "A";
+                    data[DatabaseAccessor::TTL_COLUMN] = "300";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+                    return (true);
+                case 4:
+                    data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+                    data[DatabaseAccessor::TTL_COLUMN] = "300";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::1";
+                    return (true);
+                case 5:
+                    data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "AAAA";
+                    data[DatabaseAccessor::TTL_COLUMN] = "300";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "2001:db8::2";
+                    return (true);
+                case 6:
+                    data[DatabaseAccessor::NAME_COLUMN] = "ttldiff.example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "A";
+                    data[DatabaseAccessor::TTL_COLUMN] = "300";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+                    return (true);
+                case 7:
+                    data[DatabaseAccessor::NAME_COLUMN] = "ttldiff.example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "A";
+                    data[DatabaseAccessor::TTL_COLUMN] = "600";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+                    return (true);
+                default:
+                    ADD_FAILURE() <<
+                        "Request past the end of iterator context";
+                case 8:
+                    return (false);
+            }
+        }
+    };
+    class EmptyIteratorContext : public IteratorContext {
+    public:
+        virtual bool getNext(string(&)[COLUMN_COUNT]) {
+            return (false);
+        }
+    };
+    class BadIteratorContext : public IteratorContext {
+    private:
+        int step;
+    public:
+        BadIteratorContext() :
+            step(0)
+        { }
+        virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+            switch (step ++) {
+                case 0:
+                    data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "A";
+                    data[DatabaseAccessor::TTL_COLUMN] = "300";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.1";
+                    return (true);
+                case 1:
+                    data[DatabaseAccessor::NAME_COLUMN] = "x.example.org";
+                    data[DatabaseAccessor::TYPE_COLUMN] = "A";
+                    data[DatabaseAccessor::TTL_COLUMN] = "301";
+                    data[DatabaseAccessor::RDATA_COLUMN] = "192.0.2.2";
+                    return (true);
+                default:
+                    ADD_FAILURE() <<
+                        "Request past the end of iterator context";
+                case 2:
+                    return (false);
+            }
+        }
+    };
+    class MockDiffIteratorContext : public IteratorContext {
+        const vector<JournalEntry> diffs_;
+        vector<JournalEntry>::const_iterator it_;
+    public:
+        MockDiffIteratorContext(const vector<JournalEntry>& diffs) :
+            diffs_(diffs), it_(diffs_.begin())
+        {}
+        virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+            if (it_ == diffs_.end()) {
+                return (false);
+            }
+            data[DatabaseAccessor::NAME_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_NAME];
+            data[DatabaseAccessor::TYPE_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_TYPE];
+            data[DatabaseAccessor::TTL_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_TTL];
+            data[DatabaseAccessor::RDATA_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_RDATA];
+            ++it_;
+            return (true);
+        }
+    };
+public:
+    virtual IteratorContextPtr getAllRecords(int id) const {
+        if (id == READONLY_ZONE_ID) {
+            return (IteratorContextPtr(new MockIteratorContext(
+                                           *readonly_records_)));
+        } else if (id == 13) {
+            return (IteratorContextPtr());
+        } else if (id == 0) {
+            return (IteratorContextPtr(new EmptyIteratorContext()));
+        } else if (id == -1) {
+            return (IteratorContextPtr(new BadIteratorContext()));
+        } else {
+            isc_throw(isc::Unexpected, "Unknown zone ID");
+        }
+    }
+
+    virtual IteratorContextPtr getRecords(const std::string& name, int id,
+                                          bool subdomains) const
+    {
+        if (id == READONLY_ZONE_ID || id == WRITABLE_ZONE_ID) {
+            return (IteratorContextPtr(
+                        new MockNameIteratorContext(*this, id, name,
+                                                    subdomains)));
+        } else {
+            // This iterator is bogus, but for the cases tested below that's
+            // sufficient.
+            return (IteratorContextPtr(
+                        new MockNameIteratorContext(*this, READONLY_ZONE_ID,
+                                                    name, subdomains)));
+        }
+    }
+
+    virtual pair<bool, int> startUpdateZone(const std::string& zone_name,
+                                            bool replace)
+    {
+        const pair<bool, int> zone_info = getZone(zone_name);
+        if (!zone_info.first) {
+            return (pair<bool, int>(false, 0));
+        }
+
+        // Prepare the record set for update.  If replacing the existing one,
+        // we use an empty set; otherwise we use a writable copy of the
+        // original.
+        if (replace) {
+            update_records_->clear();
+        } else {
+            *update_records_ = *readonly_records_;
+        }
+
+        if (zone_name == "bad.example.org.") {
+            return (pair<bool, int>(true, -1));
+        } else if (zone_name == "null.example.org.") {
+            return (pair<bool, int>(true, 13));
+        } else {
+            return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+        }
+    }
+    virtual void commit() {
+        *readonly_records_ = *update_records_;
+    }
+    virtual void rollback() {
+        // Special hook: if something with a name of "throw.example.org"
+        // has been added, trigger an imaginary unexpected event with an
+        // exception.
+        if (update_records_->count("throw.example.org.") > 0) {
+            isc_throw(DataSourceError, "unexpected failure in rollback");
+        }
+
+        rollbacked_ = true;
+    }
+    virtual void addRecordToZone(const string (&columns)[ADD_COLUMN_COUNT]) {
+        // Copy the current value to cur_name.  If it doesn't exist,
+        // operator[] will create a new one.
+        cur_name_ = (*update_records_)[columns[DatabaseAccessor::ADD_NAME]];
+
+        vector<string> record_columns;
+        record_columns.push_back(columns[DatabaseAccessor::ADD_TYPE]);
+        record_columns.push_back(columns[DatabaseAccessor::ADD_TTL]);
+        record_columns.push_back(columns[DatabaseAccessor::ADD_SIGTYPE]);
+        record_columns.push_back(columns[DatabaseAccessor::ADD_RDATA]);
+        record_columns.push_back(columns[DatabaseAccessor::ADD_NAME]);
+
+        // copy back the added entry
+        cur_name_.push_back(record_columns);
+        (*update_records_)[columns[DatabaseAccessor::ADD_NAME]] = cur_name_;
+
+        // remember this one so that test cases can check it.
+        copy(columns, columns + DatabaseAccessor::ADD_COLUMN_COUNT,
+             columns_lastadded_);
+    }
+
+    // Helper predicate class used in deleteRecordInZone().
+    struct deleteMatch {
+        deleteMatch(const string& type, const string& rdata) :
+            type_(type), rdata_(rdata)
+        {}
+        bool operator()(const vector<string>& row) const {
+            return (row[0] == type_ && row[3] == rdata_);
+        }
+        const string& type_;
+        const string& rdata_;
+    };
+
+    virtual void deleteRecordInZone(const string (&params)[DEL_PARAM_COUNT]) {
+        vector<vector<string> >& records =
+            (*update_records_)[params[DatabaseAccessor::DEL_NAME]];
+        records.erase(remove_if(records.begin(), records.end(),
+                                deleteMatch(
+                                    params[DatabaseAccessor::DEL_TYPE],
+                                    params[DatabaseAccessor::DEL_RDATA])),
+                      records.end());
+        if (records.empty()) {
+            (*update_records_).erase(params[DatabaseAccessor::DEL_NAME]);
+        }
+    }
+
+    //
+    // Helper methods to keep track of some update related activities
+    //
+    bool isRollbacked() const {
+        return (rollbacked_);
+    }
+
+    const string* getLastAdded() const {
+        return (columns_lastadded_);
+    }
+
+    // This allows the test code to get the accessor used in an update context
+    shared_ptr<const MockAccessor> getLatestClone() const {
+        return (latest_clone_);
+    }
+
+    virtual std::string findPreviousName(int id, const std::string& rname)
+        const
+    {
+        if (id == -1) {
+            isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+        } else if (id == READONLY_ZONE_ID) {
+            // For some specific names we intentionally return broken or
+            // unexpected result.
+            if (rname == "org.example.badnsec2.") {
+                return ("badnsec1.example.org.");
+            } else if (rname == "org.example.brokenname.") {
+                return ("brokenname...example.org.");
+            } else if (rname == "org.example.notimplnsec." ||
+                       rname == "org.example.wild.here.") {
+                isc_throw(isc::NotImplemented, "Not implemented in this test");
+            }
+
+            // For the general case, we search for the first name N in the
+            // domains that meets N >= reverse(rname) using lower_bound.
+            // The "previous name" is the name of the previous entry of N.
+            // Note that Domains are internally sorted by the Name comparison
+            // order.  Due to the API requirement we are given a reversed
+            // name (rname), so we need to reverse it again to convert it
+            // to the original name.
+            Domains::const_iterator it(readonly_records_->lower_bound(
+                                           Name(rname).reverse().toText()));
+            if (it == readonly_records_->begin()) {
+                isc_throw(isc::Unexpected, "Unexpected name");
+            }
+            if (it == readonly_records_->end()) {
+                return ((*readonly_records_->rbegin()).first);
+            }
+            return ((*(--it)).first);
+        } else {
+            isc_throw(isc::Unexpected, "Unknown zone ID");
+        }
+    }
+    virtual void addRecordDiff(int id, uint32_t serial,
+                               DiffOperation operation,
+                               const std::string (&data)[DIFF_PARAM_COUNT])
+    {
+        if (id == 13) { // The null zone doesn't support journaling
+            isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+        } else if (id == -1) { // Bad zone throws
+            isc_throw(DataSourceError, "Test error");
+        } else {
+            journal_entries_->push_back(JournalEntry(id, serial, operation,
+                                                     data));
+        }
+    }
+
+    virtual IteratorContextPtr getDiffs(int id, uint32_t start,
+                                        uint32_t end) const
+    {
+        vector<JournalEntry> selected_jnl;
+
+        for (vector<JournalEntry>::const_iterator it =
+                 journal_entries_->begin();
+             it != journal_entries_->end(); ++it)
+        {
+            // For simplicity we assume this method is called for the
+            // "readonly" zone possibly after making updates on the "writable"
+            // copy and committing them.
+            if (id != READONLY_ZONE_ID) {
+                continue;
+            }
+
+            // Note: the following logic is not 100% accurate in terms of
+            // serial number arithmetic; we prefer brevity for testing.
+            // Skip until we see the starting serial.  Once we started
+            // recording this condition is ignored (to support wrap-around
+            // case).  Also, it ignores the RR type; it only checks the
+            // versions.
+            if ((*it).serial_ < start && selected_jnl.empty()) {
+                continue;
+            }
+            if ((*it).serial_ > end) { // gone over the end serial. we're done.
+                break;
+            }
+            selected_jnl.push_back(*it);
+        }
+
+        // Check if we've found the requested range.  If not, throw.
+        if (selected_jnl.empty() || selected_jnl.front().serial_ != start ||
+            selected_jnl.back().serial_ != end) {
+            isc_throw(NoSuchSerial, "requested diff range is not found");
+        }
+
+        return (IteratorContextPtr(new MockDiffIteratorContext(selected_jnl)));
+    }
+
+    // Check the journal is as expected and clear the journal
+    void checkJournal(const std::vector<JournalEntry> &expected) const {
+        std::vector<JournalEntry> journal;
+        // Clean the journal, but keep local copy to check
+        journal.swap(*journal_entries_);
+        ASSERT_EQ(expected.size(), journal.size());
+        for (size_t i(0); i < expected.size(); ++ i) {
+            EXPECT_TRUE(expected[i] == journal[i]);
+        }
+    }
+
+private:
+    // The following member variables are storage and/or update work space
+    // of the test zone.  The "master"s are the real objects that contain
+    // the data, and they are shared among all accessors cloned from
+    // an initially created one.  The "copy" data will be used for read-only
+    // transaction.  The pointer members allow the sharing.
+    // "readonly" is for normal lookups.  "update" is the workspace for
+    // updates.  When update starts it will be initialized either as an
+    // empty set (when replacing the entire zone) or as a copy of the
+    // "readonly" one.  "empty" is a sentinel to produce negative results.
+    Domains readonly_records_master_;
+    Domains readonly_records_copy_;
+    Domains* readonly_records_;
+    Domains update_records_master_;
+    Domains* update_records_;
+    const Domains empty_records_master_;
+    const Domains* empty_records_;
+
+    // The journal data
+    std::vector<JournalEntry> journal_entries_master_;
+    std::vector<JournalEntry>* journal_entries_;
+
+    // used as temporary storage after searchForRecord() and during
+    // getNextRecord() calls, as well as during the building of the
+    // fake data
+    std::vector< std::vector<std::string> > cur_name_;
+
+    // The columns that were most recently added via addRecordToZone()
+    string columns_lastadded_[ADD_COLUMN_COUNT];
+
+    // Whether rollback operation has been performed for the database.
+    // Not useful except for purely testing purpose.
+    bool rollbacked_;
+
+    // Remember the mock accessor that was last cloned
+    boost::shared_ptr<MockAccessor> latest_clone_;
+
+    // Internal flag for duplicate check
+    bool did_transaction_;
+
+    const Domains& getMockRecords(int zone_id) const {
+        if (zone_id == READONLY_ZONE_ID) {
+            return (*readonly_records_);
+        } else if (zone_id == WRITABLE_ZONE_ID) {
+            return (*update_records_);
+        }
+        return (*empty_records_);
+    }
+
+    // Adds one record to the current name in the database
+    // The actual data will not be added to 'records' until
+    // addCurName() is called
+    void addRecord(const std::string& type,
+                   const std::string& ttl,
+                   const std::string& sigtype,
+                   const std::string& rdata) {
+        std::vector<std::string> columns;
+        columns.push_back(type);
+        columns.push_back(ttl);
+        columns.push_back(sigtype);
+        columns.push_back(rdata);
+        cur_name_.push_back(columns);
+    }
+
+    // Adds all records we just built with calls to addRecords
+    // to the actual fake database. This will clear cur_name_,
+    // so we can immediately start adding new records.
+    void addCurName(const std::string& name) {
+        ASSERT_EQ(0, readonly_records_->count(name));
+        // Append the name to all of them
+        for (std::vector<std::vector<std::string> >::iterator
+             i(cur_name_.begin()); i != cur_name_.end(); ++ i) {
+            i->push_back(name);
+        }
+        (*readonly_records_)[name] = cur_name_;
+        cur_name_.clear();
+    }
+
+    // Fills the database with zone data.
+    // This method constructs a number of resource records (with addRecord),
+    // which will all be added for one domain name to the fake database
+    // (with addCurName). So for instance the first set of calls create
+    // data for the name 'www.example.org', which will consist of one A RRset
+    // of one record, and one AAAA RRset of two records.
+    // The order in which they are added is the order in which getNextRecord()
+    // will return them (so we can test whether find() etc. support data that
+    // might not come in 'normal' order)
+    // It shall immediately fail if you try to add the same name twice.
+    void fillData() {
+        const char* prev_name = NULL;
+        for (int i = 0; TEST_RECORDS[i][0] != NULL; ++i) {
+            if (prev_name != NULL &&
+                strcmp(prev_name, TEST_RECORDS[i][0]) != 0) {
+                addCurName(prev_name);
+            }
+            prev_name = TEST_RECORDS[i][0];
+            addRecord(TEST_RECORDS[i][1], TEST_RECORDS[i][2],
+                      TEST_RECORDS[i][3], TEST_RECORDS[i][4]);
+        }
+        addCurName(prev_name);
+    }
+};
+
+// This tests the default getRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getRecords) {
+    EXPECT_THROW(NopAccessor().getRecords(".", 1, false),
+                 isc::NotImplemented);
+}
+
+// This tests the default getAllRecords behaviour, throwing NotImplemented
+TEST(DatabaseConnectionTest, getAllRecords) {
+    // The parameters don't matter
+    EXPECT_THROW(NopAccessor().getAllRecords(1),
+                 isc::NotImplemented);
+}
+
+// This test fixture is templated so that we can share (most of) the test
+// cases with different types of data sources.  Note that in test cases
+// we need to use 'this' to refer to member variables of the test class.
+template <typename ACCESSOR_TYPE>
+class DatabaseClientTest : public ::testing::Test {
+public:
+    DatabaseClientTest() : zname_("example.org"), qname_("www.example.org"),
+                           qclass_(RRClass::IN()), qtype_(RRType::A()),
+                           rrttl_(3600)
+    {
+        createClient();
+
+        // set up the commonly used finder.
+        DataSourceClient::FindResult zone(client_->findZone(zname_));
+        assert(zone.code == result::SUCCESS);
+        finder_ = dynamic_pointer_cast<DatabaseClient::Finder>(
+            zone.zone_finder);
+
+        // Test IN/A RDATA to be added in update tests.  Intentionally using
+        // different data than the initial data configured in the MockAccessor.
+        rrset_.reset(new RRset(qname_, qclass_, qtype_, rrttl_));
+        rrset_->addRdata(rdata::createRdata(rrset_->getType(),
+                                            rrset_->getClass(), "192.0.2.2"));
+        soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+        soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+                                         "ns1.example.org. admin.example.org. "
+                                         "1234 3600 1800 2419200 7200"));
+
+        // And its RRSIG.  Also different from the configured one.
+        rrsigset_.reset(new RRset(qname_, qclass_, RRType::RRSIG(),
+                                  rrttl_));
+        rrsigset_->addRdata(rdata::createRdata(rrsigset_->getType(),
+                                               rrsigset_->getClass(),
+                                               "A 5 3 0 20000101000000 "
+                                               "20000201000000 0 example.org. "
+                                               "FAKEFAKEFAKE"));
+    }
+
+    /*
+     * We initialize the client from a function, so we can call it multiple
+     * times per test.
+     */
+    void createClient() {
+        // To make sure we always have empty diffs table at the beginning of
+        // each test, we re-install the writable data source here.
+        // Note: this is SQLite3 specific and a waste (though otherwise
+        // harmless) for other types of data sources.  If and when we support
+        // more types of data sources in this test framework, we should
+        // probably move this to some specialized templated method specific
+        // to SQLite3 (or for even a longer term we should add an API to
+        // purge the diffs table).
+        const char* const install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+            "/rwtest.sqlite3 " TEST_DATA_BUILDDIR
+            "/rwtest.sqlite3.copied";
+        if (system(install_cmd) != 0) {
+            // any exception will do, this is failure in test setup, but nice
+            // to show the command that fails, and shouldn't be caught
+            isc_throw(isc::Exception,
+                      "Error setting up; command failed: " << install_cmd);
+        }
+
+        current_accessor_ = new ACCESSOR_TYPE();
+        is_mock_ = (dynamic_cast<MockAccessor*>(current_accessor_) != NULL);
+        client_.reset(new DatabaseClient(qclass_,
+                                         shared_ptr<ACCESSOR_TYPE>(
+                                             current_accessor_)));
+    }
+
+    /**
+     * Check the zone finder is a valid one and references the zone ID and
+     * database available here.
+     */
+    void checkZoneFinder(const DataSourceClient::FindResult& zone) {
+        ASSERT_NE(ZoneFinderPtr(), zone.zone_finder) << "No zone finder";
+        shared_ptr<DatabaseClient::Finder> finder(
+            dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+        ASSERT_NE(shared_ptr<DatabaseClient::Finder>(), finder) <<
+            "Wrong type of finder";
+        if (is_mock_) {
+            EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+        }
+        EXPECT_EQ(current_accessor_, &finder->getAccessor());
+    }
+
+    shared_ptr<DatabaseClient::Finder> getFinder() {
+        DataSourceClient::FindResult zone(client_->findZone(zname_));
+        EXPECT_EQ(result::SUCCESS, zone.code);
+        shared_ptr<DatabaseClient::Finder> finder(
+            dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+        if (is_mock_) {
+            EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+        }
+
+        return (finder);
+    }
+
+    // Helper methods for update tests
+    bool isRollbacked(bool expected = false) const {
+        if (is_mock_) {
+            const MockAccessor& mock_accessor =
+                dynamic_cast<const MockAccessor&>(*update_accessor_);
+            return (mock_accessor.isRollbacked());
+        } else {
+            return (expected);
+        }
+    }
+
+    void checkLastAdded(const char* const expected[]) const {
+        if (is_mock_) {
+            const MockAccessor* mock_accessor =
+                dynamic_cast<const MockAccessor*>(current_accessor_);
+            for (int i = 0; i < DatabaseAccessor::ADD_COLUMN_COUNT; ++i) {
+                EXPECT_EQ(expected[i],
+                          mock_accessor->getLatestClone()->getLastAdded()[i]);
+            }
+        }
+    }
+
+    void setUpdateAccessor() {
+        if (is_mock_) {
+            const MockAccessor* mock_accessor =
+                dynamic_cast<const MockAccessor*>(current_accessor_);
+            update_accessor_ = mock_accessor->getLatestClone();
+        }
+    }
+
+    void checkJournal(const vector<JournalEntry>& expected) {
+        if (is_mock_) {
+            const MockAccessor* mock_accessor =
+                dynamic_cast<const MockAccessor*>(current_accessor_);
+            mock_accessor->checkJournal(expected);
+        } else {
+            // For other generic databases, retrieve the diff using the
+            // reader class and compare the resulting sequence of RRset.
+            // For simplicity we only consider the case where the expected
+            // sequence is not empty.
+            ASSERT_FALSE(expected.empty());
+            const Name zone_name(expected.front().
+                                 data_[DatabaseAccessor::DIFF_NAME]);
+            ZoneJournalReaderPtr jnl_reader =
+                client_->getJournalReader(zone_name,
+                                          expected.front().serial_,
+                                          expected.back().serial_).second;
+            ASSERT_TRUE(jnl_reader);
+            ConstRRsetPtr rrset;
+            vector<JournalEntry>::const_iterator it = expected.begin();
+            for (rrset = jnl_reader->getNextDiff();
+                 rrset && it != expected.end();
+                 rrset = jnl_reader->getNextDiff(), ++it) {
+                typedef DatabaseAccessor Accessor;
+                RRsetPtr expected_rrset(
+                    new RRset(Name((*it).data_[Accessor::DIFF_NAME]),
+                              qclass_,
+                              RRType((*it).data_[Accessor::DIFF_TYPE]),
+                              RRTTL((*it).data_[Accessor::DIFF_TTL])));
+                expected_rrset->addRdata(
+                    rdata::createRdata(expected_rrset->getType(),
+                                       expected_rrset->getClass(),
+                                       (*it).data_[Accessor::DIFF_RDATA]));
+                isc::testutils::rrsetCheck(expected_rrset, rrset);
+            }
+            // We should have examined all entries of both expected and
+            // actual data.
+            EXPECT_TRUE(it == expected.end());
+            ASSERT_FALSE(rrset);
+        }
+    }
+
+    // Some tests only work for MockAccessor.  We remember whether our accessor
+    // is of that type.
+    bool is_mock_;
+
+    // Will be deleted by client_, just keep the current value for comparison.
+    ACCESSOR_TYPE* current_accessor_;
+    shared_ptr<DatabaseClient> client_;
+    const std::string database_name_;
+
+    // The zone finder of the test zone commonly used in various tests.
+    shared_ptr<DatabaseClient::Finder> finder_;
+
+    // Some shortcut variables for commonly used test parameters
+    const Name zname_; // the zone name stored in the test data source
+    const Name qname_; // commonly used name to be found
+    const RRClass qclass_;      // commonly used RR class used with qname
+    const RRType qtype_;        // commonly used RR type used with qname
+    const RRTTL rrttl_;         // commonly used RR TTL
+    RRsetPtr rrset_;            // for adding/deleting an RRset
+    RRsetPtr rrsigset_;         // for adding/deleting an RRset
+    RRsetPtr soa_;              // for adding/deleting an RRset
+
+    // update related objects to be tested
+    ZoneUpdaterPtr updater_;
+    shared_ptr<const DatabaseAccessor> update_accessor_;
+
+    // placeholders
+    const std::vector<std::string> empty_rdatas_; // for NXRRSET/NXDOMAIN
+    std::vector<std::string> expected_rdatas_;
+    std::vector<std::string> expected_sig_rdatas_;
+};
+
+class TestSQLite3Accessor : public SQLite3Accessor {
+public:
+    TestSQLite3Accessor() : SQLite3Accessor(
+        TEST_DATA_BUILDDIR "/rwtest.sqlite3.copied", "IN")
+    {
+        startUpdateZone("example.org.", true);
+        string columns[ADD_COLUMN_COUNT];
+        for (int i = 0; TEST_RECORDS[i][0] != NULL; ++i) {
+            columns[ADD_NAME] = TEST_RECORDS[i][0];
+            columns[ADD_REV_NAME] = Name(columns[ADD_NAME]).reverse().toText();
+            columns[ADD_TYPE] = TEST_RECORDS[i][1];
+            columns[ADD_TTL] = TEST_RECORDS[i][2];
+            columns[ADD_SIGTYPE] = TEST_RECORDS[i][3];
+            columns[ADD_RDATA] = TEST_RECORDS[i][4];
+
+            addRecordToZone(columns);
+        }
+        commit();
+    }
+};
+
+// The following two lines instantiate test cases with concrete accessor
+// classes to be tested.
+// XXX: clang++ installed on our FreeBSD buildbot cannot complete compiling
+// this file, seemingly due to the size of the code.  We'll consider more
+// complete workaround, but for a short term workaround we'll reduce the
+// number of tested accessor classes (thus reducing the amount of code
+// to be compiled) for this particular environment.
+#if defined(__clang__) && defined(__FreeBSD__)
+typedef ::testing::Types<MockAccessor> TestAccessorTypes;
+#else
+typedef ::testing::Types<MockAccessor, TestSQLite3Accessor> TestAccessorTypes;
+#endif
+
+TYPED_TEST_CASE(DatabaseClientTest, TestAccessorTypes);
+
+// In some cases the entire test fixture is for the mock accessor only.
+// We use the usual TEST_F for them with the corresponding specialized class
+// to make the code simpler.
+typedef DatabaseClientTest<MockAccessor> MockDatabaseClientTest;
+
+TYPED_TEST(DatabaseClientTest, zoneNotFound) {
+    DataSourceClient::FindResult zone(
+        this->client_->findZone(Name("example.com")));
+    EXPECT_EQ(result::NOTFOUND, zone.code);
+}
+
+TYPED_TEST(DatabaseClientTest, exactZone) {
+    DataSourceClient::FindResult zone(
+        this->client_->findZone(Name("example.org")));
+    EXPECT_EQ(result::SUCCESS, zone.code);
+    this->checkZoneFinder(zone);
+}
+
+TYPED_TEST(DatabaseClientTest, superZone) {
+    DataSourceClient::FindResult zone(this->client_->findZone(Name(
+        "sub.example.org")));
+    EXPECT_EQ(result::PARTIALMATCH, zone.code);
+    this->checkZoneFinder(zone);
+}
+
+// This test doesn't depend on derived accessor class, so we use TEST().
+TEST(GenericDatabaseClientTest, noAccessorException) {
+    // We need a dummy variable here; some compiler would regard it a mere
+    // declaration instead of an instantiation and make the test fail.
+    EXPECT_THROW(DatabaseClient dummy(RRClass::IN(),
+                                      shared_ptr<DatabaseAccessor>()),
+                 isc::InvalidParameter);
+}
+
+// If the zone doesn't exist, exception is thrown
+TYPED_TEST(DatabaseClientTest, noZoneIterator) {
+    EXPECT_THROW(this->client_->getIterator(Name("example.com")),
+                 DataSourceError);
+}
+
+// If the zone doesn't exist and iteration is not implemented, it still throws
+// the exception it doesn't exist
+TEST(GenericDatabaseClientTest, noZoneNotImplementedIterator) {
+    EXPECT_THROW(DatabaseClient(RRClass::IN(),
+                                boost::shared_ptr<DatabaseAccessor>(
+                                    new NopAccessor())).getIterator(
+                                        Name("example.com")),
+                 DataSourceError);
+}
+
+TEST(GenericDatabaseClientTest, notImplementedIterator) {
+    EXPECT_THROW(DatabaseClient(RRClass::IN(), shared_ptr<DatabaseAccessor>(
+        new NopAccessor())).getIterator(Name("example.org")),
+                 isc::NotImplemented);
+}
+
+// Pretend a bug in the connection and pass NULL as the context
+// Should not crash, but gracefully throw.  Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, nullIteratorContext) {
+    EXPECT_THROW(this->client_->getIterator(Name("null.example.org")),
+                 isc::Unexpected);
+}
+
+// It doesn't crash or anything if the zone is completely empty.
+// Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, emptyIterator) {
+    ZoneIteratorPtr it(this->client_->getIterator(Name("empty.example.org")));
+    EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+    // This is past the end, it should throw
+    EXPECT_THROW(it->getNextRRset(), isc::Unexpected);
+}
+
+// checks if the given rrset matches the
+// given name, class, type and rdatas
+void
+checkRRset(isc::dns::ConstRRsetPtr rrset,
+           const isc::dns::Name& name,
+           const isc::dns::RRClass& rrclass,
+           const isc::dns::RRType& rrtype,
+           const isc::dns::RRTTL& rrttl,
+           const std::vector<std::string>& rdatas) {
+    isc::dns::RRsetPtr expected_rrset(
+        new isc::dns::RRset(name, rrclass, rrtype, rrttl));
+    for (unsigned int i = 0; i < rdatas.size(); ++i) {
+        expected_rrset->addRdata(
+            isc::dns::rdata::createRdata(rrtype, rrclass,
+                                         rdatas[i]));
+    }
+    isc::testutils::rrsetCheck(expected_rrset, rrset);
+}
+
+// Iterate through a zone
+TYPED_TEST(DatabaseClientTest, iterator) {
+    ZoneIteratorPtr it(this->client_->getIterator(Name("example.org")));
+    ConstRRsetPtr rrset(it->getNextRRset());
+    ASSERT_NE(ConstRRsetPtr(), rrset);
+
+    // The first name should be the zone origin.
+    EXPECT_EQ(this->zname_, rrset->getName());
+
+    // The rest of the checks work only for the mock accessor.
+    if (!this->is_mock_) {
+        return;
+    }
+
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    checkRRset(rrset, Name("example.org"), this->qclass_, RRType::A(),
+               this->rrttl_, this->expected_rdatas_);
+
+    rrset = it->getNextRRset();
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("ns1.example.org. admin.example.org. "
+                                     "1234 3600 1800 2419200 7200");
+    checkRRset(rrset, Name("example.org"), this->qclass_, RRType::SOA(),
+               this->rrttl_, this->expected_rdatas_);
+
+    rrset = it->getNextRRset();
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::A(),
+               RRTTL(300), this->expected_rdatas_);
+
+    rrset = it->getNextRRset();
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("2001:db8::1");
+    this->expected_rdatas_.push_back("2001:db8::2");
+    checkRRset(rrset, Name("x.example.org"), this->qclass_, RRType::AAAA(),
+               RRTTL(300), this->expected_rdatas_);
+
+    rrset = it->getNextRRset();
+    ASSERT_NE(ConstRRsetPtr(), rrset);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    checkRRset(rrset, Name("ttldiff.example.org"), this->qclass_, RRType::A(),
+               RRTTL(300), this->expected_rdatas_);
+
+    EXPECT_EQ(ConstRRsetPtr(), it->getNextRRset());
+}
+
+// This has inconsistent TTL in the set (the rest, like nonsense in
+// the data is handled in rdata itself).  Works for the mock accessor only.
+TEST_F(MockDatabaseClientTest, badIterator) {
+    // It should not throw, but get the lowest one of them
+    ZoneIteratorPtr it(this->client_->getIterator(Name("bad.example.org")));
+    EXPECT_EQ(it->getNextRRset()->getTTL(), isc::dns::RRTTL(300));
+}
+
+TYPED_TEST(DatabaseClientTest, getSOAFromIterator) {
+    vector<string> soa_data;
+    soa_data.push_back("ns1.example.org. admin.example.org. "
+                       "1234 3600 1800 2419200 7200");
+
+    ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+    ASSERT_TRUE(it);
+    checkRRset(it->getSOA(), this->zname_, this->qclass_, RRType::SOA(),
+               this->rrttl_, soa_data);
+
+    // Iterate over the zone until we find an SOA.  Although there's a broken
+    // RDATA that would trigger an exception in getNextRRset(), we should
+    // reach the SOA as the sequence should be sorted and the SOA is at
+    // the origin name (which has no bogus data).
+    ConstRRsetPtr rrset;
+    while ((rrset = it->getNextRRset()) != ConstRRsetPtr() &&
+           rrset->getType() != RRType::SOA()) {
+        ;
+    }
+    ASSERT_TRUE(rrset);
+    // It should be identical to the result of getSOA().
+    isc::testutils::rrsetCheck(it->getSOA(), rrset);
+}
+
+TYPED_TEST(DatabaseClientTest, noSOAFromIterator) {
+    // First, empty the zone.
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->updater_->commit();
+
+    // Then getSOA() should return NULL.
+    ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+    ASSERT_TRUE(it);
+    EXPECT_FALSE(it->getSOA());
+}
+
+TYPED_TEST(DatabaseClientTest, iterateThenUpdate) {
+    ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+    ASSERT_TRUE(it);
+
+    // Try to empty the zone after getting the iterator.  Depending on the
+    // underlying data source, it may result in an exception due to the
+    // transaction for the iterator.  In either case the integrity of the
+    // iterator result should be reserved.
+    try {
+        this->updater_ = this->client_->getUpdater(this->zname_, true);
+        this->updater_->commit();
+
+        // Confirm at least it doesn't contain any SOA
+        EXPECT_EQ(ZoneFinder::NXDOMAIN,
+                  this->getFinder()->find(this->zname_, RRType::SOA()).code);
+    } catch (const DataSourceError&) {}
+
+    ConstRRsetPtr rrset;
+    while ((rrset = it->getNextRRset()) != ConstRRsetPtr() &&
+           rrset->getType() != RRType::SOA()) {
+        ;
+    }
+    ASSERT_TRUE(rrset);
+    // It should be identical to the result of getSOA().
+    isc::testutils::rrsetCheck(it->getSOA(), rrset);
+}
+
+TYPED_TEST(DatabaseClientTest, updateThenIterateThenUpdate) {
+    // First clear the zone.
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->updater_->commit();
+
+    // Then iterate over it.  It should immediately reach the end, at which
+    // point the transaction should be committed.
+    ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+    ASSERT_TRUE(it);
+    EXPECT_FALSE(it->getNextRRset());
+
+    // So another update attempt should succeed, too.
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->updater_->commit();
+}
+
+TYPED_TEST(DatabaseClientTest, updateAfterDeleteIterator) {
+    // Similar to the previous case, but we delete the iterator in the
+    // middle of zone.  The transaction should be canceled (actually no
+    // different from commit though) at that point.
+    ZoneIteratorPtr it(this->client_->getIterator(this->zname_));
+    ASSERT_TRUE(it);
+    EXPECT_TRUE(it->getNextRRset());
+    it.reset();
+
+    // So another update attempt should succeed.
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->updater_->commit();
+}
+
+void
+doFindTest(ZoneFinder& finder,
+           const isc::dns::Name& name,
+           const isc::dns::RRType& type,
+           const isc::dns::RRType& expected_type,
+           const isc::dns::RRTTL expected_ttl,
+           ZoneFinder::Result expected_result,
+           const std::vector<std::string>& expected_rdatas,
+           const std::vector<std::string>& expected_sig_rdatas,
+           const isc::dns::Name& expected_name = isc::dns::Name::ROOT_NAME(),
+           const ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT)
+{
+    SCOPED_TRACE("doFindTest " + name.toText() + " " + type.toText());
+    const ZoneFinder::FindResult result = finder.find(name, type, NULL,
+                                                      options);
+    ASSERT_EQ(expected_result, result.code) << name << " " << type;
+    if (!expected_rdatas.empty() && result.rrset) {
+        checkRRset(result.rrset, expected_name != Name(".") ? expected_name :
+                   name, finder.getClass(), expected_type, expected_ttl,
+                   expected_rdatas);
+
+        if (!expected_sig_rdatas.empty() && result.rrset->getRRsig()) {
+            checkRRset(result.rrset->getRRsig(), expected_name != Name(".") ?
+                       expected_name : name, finder.getClass(),
+                       isc::dns::RRType::RRSIG(), expected_ttl,
+                       expected_sig_rdatas);
+        } else if (expected_sig_rdatas.empty()) {
+            EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset->getRRsig());
+        } else {
+            ADD_FAILURE() << "Missing RRSIG";
+        }
+    } else if (expected_rdatas.empty()) {
+        EXPECT_EQ(isc::dns::RRsetPtr(), result.rrset);
+    } else {
+        ADD_FAILURE() << "Missing result";
+    }
+}
+
+// When asking for an RRset where RRs somehow have different TTLs, it should 
+// convert to the lowest one.
+TEST_F(MockDatabaseClientTest, ttldiff) {
+    ZoneIteratorPtr it(this->client_->getIterator(Name("example.org")));
+    // Walk through the full iterator, we should see 1 rrset with name
+    // ttldiff1.example.org., and two rdatas. Same for ttldiff2
+    Name name("ttldiff.example.org.");
+    bool found = false;
+    //bool found2 = false;
+    ConstRRsetPtr rrset = it->getNextRRset();
+    while(rrset != ConstRRsetPtr()) {
+        if (rrset->getName() == name) {
+            ASSERT_FALSE(found);
+            ASSERT_EQ(2, rrset->getRdataCount());
+            ASSERT_EQ(RRTTL(300), rrset->getTTL());
+            found = true;
+        }
+        rrset = it->getNextRRset();
+    }
+    ASSERT_TRUE(found);
+}
+
+// Unless we ask for individual RRs in our iterator request. In that case
+// every RR should go into its own 'rrset'
+TEST_F(MockDatabaseClientTest, ttldiff_separate_rrs) {
+    ZoneIteratorPtr it(this->client_->getIterator(Name("example.org"), true));
+
+    // Walk through the full iterator, we should see 1 rrset with name
+    // ttldiff1.example.org., and two rdatas. Same for ttldiff2
+    Name name("ttldiff.example.org.");
+    int found1 = false;
+    int found2 = false;
+    ConstRRsetPtr rrset = it->getNextRRset();
+    while(rrset != ConstRRsetPtr()) {
+        if (rrset->getName() == name) {
+            ASSERT_EQ(1, rrset->getRdataCount());
+            // We should find 1 'rrset' with TTL 300 and one with TTL 600
+            if (rrset->getTTL() == RRTTL(300)) {
+                ASSERT_FALSE(found1);
+                found1 = true;
+            } else if (rrset->getTTL() == RRTTL(600)) {
+                ASSERT_FALSE(found2);
+                found2 = true;
+            } else {
+                FAIL() << "Found unexpected TTL: " <<
+                          rrset->getTTL().toText();
+            }
+        }
+        rrset = it->getNextRRset();
+    }
+    ASSERT_TRUE(found1);
+    ASSERT_TRUE(found2);
+}
+
+TYPED_TEST(DatabaseClientTest, find) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    doFindTest(*finder, isc::dns::Name("www.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    doFindTest(*finder, isc::dns::Name("www2.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("2001:db8::1");
+    this->expected_rdatas_.push_back("2001:db8::2");
+    doFindTest(*finder, isc::dns::Name("www.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_,
+               ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("www.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::TXT(),
+               this->rrttl_,
+               ZoneFinder::NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("www.example.org.");
+    doFindTest(*finder, isc::dns::Name("cname.example.org."),
+               this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+               ZoneFinder::CNAME, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("www.example.org.");
+    doFindTest(*finder, isc::dns::Name("cname.example.org."),
+               isc::dns::RRType::CNAME(), isc::dns::RRType::CNAME(),
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("doesnotexist.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("2001:db8::1");
+    this->expected_rdatas_.push_back("2001:db8::2");
+    this->expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("signed1.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::TXT(), this->rrttl_,
+               ZoneFinder::NXRRSET, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("www.example.org.");
+    this->expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("signedcname1.example.org."),
+               this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+               ZoneFinder::CNAME, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12346 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("2001:db8::2");
+    this->expected_rdatas_.push_back("2001:db8::1");
+    this->expected_sig_rdatas_.push_back("AAAA 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("signed2.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::TXT(), this->rrttl_,
+               ZoneFinder::NXRRSET, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("www.example.org.");
+    this->expected_sig_rdatas_.push_back("CNAME 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("signedcname2.example.org."),
+               this->qtype_, isc::dns::RRType::CNAME(), this->rrttl_,
+               ZoneFinder::CNAME, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("acnamesig1.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("acnamesig2.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("acnamesig3.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    doFindTest(*finder, isc::dns::Name("ttldiff1.example.org."),
+               this->qtype_, this->qtype_, isc::dns::RRTTL(360),
+               ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    doFindTest(*finder, isc::dns::Name("ttldiff2.example.org."),
+               this->qtype_, this->qtype_, isc::dns::RRTTL(360),
+               ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    EXPECT_THROW(finder->find(isc::dns::Name("badcname1.example.org."),
+                                              this->qtype_,
+                                              NULL, ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+    EXPECT_THROW(finder->find(isc::dns::Name("badcname2.example.org."),
+                                              this->qtype_,
+                                              NULL, ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+    EXPECT_THROW(finder->find(isc::dns::Name("badcname3.example.org."),
+                                              this->qtype_,
+                                              NULL, ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+    EXPECT_THROW(finder->find(isc::dns::Name("badrdata.example.org."),
+                                              this->qtype_,
+                                              NULL, ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+    EXPECT_THROW(finder->find(isc::dns::Name("badtype.example.org."),
+                                              this->qtype_,
+                                              NULL, ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+    EXPECT_THROW(finder->find(isc::dns::Name("badttl.example.org."),
+                                              this->qtype_,
+                                              NULL, ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+    EXPECT_THROW(finder->find(isc::dns::Name("badsig.example.org."),
+                                              this->qtype_,
+                                              NULL, ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+
+    // Trigger the hardcoded exceptions and see if find() has cleaned up
+    if (this->is_mock_) {
+        EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.search."),
+                                  this->qtype_,
+                                  NULL, ZoneFinder::FIND_DEFAULT),
+                     DataSourceError);
+        EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.search."),
+                                  this->qtype_,
+                                  NULL, ZoneFinder::FIND_DEFAULT),
+                     isc::Exception);
+        EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.search."),
+                                  this->qtype_,
+                                  NULL, ZoneFinder::FIND_DEFAULT),
+                     std::exception);
+        EXPECT_THROW(finder->find(isc::dns::Name("dsexception.in.getnext."),
+                                  this->qtype_,
+                                  NULL, ZoneFinder::FIND_DEFAULT),
+                     DataSourceError);
+        EXPECT_THROW(finder->find(isc::dns::Name("iscexception.in.getnext."),
+                                  this->qtype_,
+                                  NULL, ZoneFinder::FIND_DEFAULT),
+                     isc::Exception);
+        EXPECT_THROW(finder->find(isc::dns::Name("basicexception.in.getnext."),
+                                  this->qtype_,
+                                  NULL, ZoneFinder::FIND_DEFAULT),
+                     std::exception);
+    }
+
+    // This RRSIG has the wrong sigtype field, which should be
+    // an error if we decide to keep using that field
+    // Right now the field is ignored, so it does not error
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 20000201000000 12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("badsigtype.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, findDelegation) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    // The apex should not be considered delegation point and we can access
+    // data
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    doFindTest(*finder, isc::dns::Name("example.org."),
+               this->qtype_, this->qtype_,
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("ns.example.com.");
+    this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+                                  "12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("example.org."),
+               isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    // Check when we ask for something below delegation point, we get the NS
+    // (Both when the RRset there exists and doesn't)
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    this->expected_rdatas_.push_back("ns.example.com.");
+    this->expected_rdatas_.push_back("ns.delegation.example.org.");
+    this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 20000201000000 "
+                                  "12345 example.org. FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+               this->qtype_, isc::dns::RRType::NS(),
+               this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+               this->expected_sig_rdatas_,
+               isc::dns::Name("delegation.example.org."));
+    doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+               this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+               this->expected_sig_rdatas_,
+               isc::dns::Name("delegation.example.org."));
+    doFindTest(*finder, isc::dns::Name("deep.below.delegation.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+               this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+               this->expected_sig_rdatas_,
+               isc::dns::Name("delegation.example.org."));
+
+    // Even when we check directly at the delegation point, we should get
+    // the NS
+    doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::NS(),
+               this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    // And when we ask direcly for the NS, we should still get delegation
+    doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+               isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+               this->rrttl_, ZoneFinder::DELEGATION, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    // Now test delegation. If it is below the delegation point, we should get
+    // the DNAME (the one with data under DNAME is invalid zone, but we test
+    // the behaviour anyway just to make sure)
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("dname.example.com.");
+    this->expected_sig_rdatas_.clear();
+    this->expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+                                  "20000201000000 12345 example.org. "
+                                  "FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+               this->qtype_, isc::dns::RRType::DNAME(),
+               this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+               this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+    doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+               this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+               this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+    doFindTest(*finder, isc::dns::Name("really.deep.below.dname.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+               this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+               this->expected_sig_rdatas_, isc::dns::Name("dname.example.org."));
+
+    // Asking direcly for DNAME should give SUCCESS
+    doFindTest(*finder, isc::dns::Name("dname.example.org."),
+               isc::dns::RRType::DNAME(), isc::dns::RRType::DNAME(),
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    // But we don't delegate at DNAME point
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("dname.example.org."),
+               this->qtype_, this->qtype_,
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+    this->expected_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("dname.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    // This is broken dname, it contains two targets
+    EXPECT_THROW(finder->find(isc::dns::Name("below.baddname.example.org."),
+                              this->qtype_, NULL,
+                              ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+
+    // Broken NS - it lives together with something else
+    EXPECT_THROW(finder->find(isc::dns::Name("brokenns1.example.org."),
+                              this->qtype_, NULL,
+                              ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+    EXPECT_THROW(finder->find(isc::dns::Name("brokenns2.example.org."),
+                              this->qtype_, NULL,
+                              ZoneFinder::FIND_DEFAULT),
+                 DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, emptyDomain) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    // This domain doesn't exist, but a subdomain of it does.
+    // Therefore we should pretend the domain is there, but contains no RRsets
+    doFindTest(*finder, isc::dns::Name("b.example.org."), this->qtype_,
+               this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+// Glue-OK mode. Just go through NS delegations down (but not through
+// DNAME) and pretend it is not there.
+TYPED_TEST(DatabaseClientTest, glueOK) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               isc::dns::Name("ns.delegation.example.org."),
+               ZoneFinder::FIND_GLUE_OK);
+    doFindTest(*finder, isc::dns::Name("nothere.delegation.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               isc::dns::Name("nothere.delegation.example.org."),
+               ZoneFinder::FIND_GLUE_OK);
+    this->expected_rdatas_.push_back("192.0.2.1");
+    doFindTest(*finder, isc::dns::Name("ns.delegation.example.org."),
+               this->qtype_, this->qtype_,
+               this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               isc::dns::Name("ns.delegation.example.org."),
+               ZoneFinder::FIND_GLUE_OK);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("ns.example.com.");
+    this->expected_rdatas_.push_back("ns.delegation.example.org.");
+    this->expected_sig_rdatas_.clear();
+    this->expected_sig_rdatas_.push_back("NS 5 3 3600 20000101000000 "
+                                   "20000201000000 12345 example.org. "
+                                   "FAKEFAKEFAKE");
+    // When we request the NS, it should be SUCCESS, not DELEGATION
+    // (different in GLUE_OK)
+    doFindTest(*finder, isc::dns::Name("delegation.example.org."),
+               isc::dns::RRType::NS(), isc::dns::RRType::NS(),
+               this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               isc::dns::Name("delegation.example.org."),
+               ZoneFinder::FIND_GLUE_OK);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("dname.example.com.");
+    this->expected_sig_rdatas_.clear();
+    this->expected_sig_rdatas_.push_back("DNAME 5 3 3600 20000101000000 "
+                                   "20000201000000 12345 example.org. "
+                                   "FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+               this->qtype_, isc::dns::RRType::DNAME(),
+               this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+               this->expected_sig_rdatas_,
+               isc::dns::Name("dname.example.org."), ZoneFinder::FIND_GLUE_OK);
+    doFindTest(*finder, isc::dns::Name("below.dname.example.org."),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::DNAME(),
+               this->rrttl_, ZoneFinder::DNAME, this->expected_rdatas_,
+               this->expected_sig_rdatas_,
+               isc::dns::Name("dname.example.org."), ZoneFinder::FIND_GLUE_OK);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcard) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    // First, simple wildcard match
+    // Check also that the RRSIG is added from the wildcard (not modified)
+    this->expected_rdatas_.push_back("192.0.2.5");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+                                         "20000201000000 12345 example.org. "
+                                         "FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_,
+               ZoneFinder::WILDCARD, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+    doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::WILDCARD,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    doFindTest(*finder, isc::dns::Name("b.a.wild.example.org"),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    // Direct request for this wildcard
+    this->expected_rdatas_.push_back("192.0.2.5");
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+                                         "20000201000000 12345 example.org. "
+                                         "FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::NXRRSET, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+    // This is nonsense, but check it doesn't match by some stupid accident
+    doFindTest(*finder, isc::dns::Name("a.*.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    // These should be canceled, since it is below a domain which exitsts
+    doFindTest(*finder, isc::dns::Name("nothing.here.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    doFindTest(*finder, isc::dns::Name("cancel.here.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    doFindTest(*finder,
+               isc::dns::Name("below.cancel.here.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    // And this should be just plain empty non-terminal domain, check
+    // the wildcard doesn't hurt it
+    doFindTest(*finder, isc::dns::Name("here.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    // Also make sure that the wildcard doesn't hurt the original data
+    // below the wildcard
+    this->expected_rdatas_.push_back("2001:db8::5");
+    doFindTest(*finder, isc::dns::Name("cancel.here.wild.example.org"),
+               isc::dns::RRType::AAAA(), isc::dns::RRType::AAAA(),
+               this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+    this->expected_rdatas_.clear();
+
+    // How wildcard go together with delegation
+    this->expected_rdatas_.push_back("ns.example.com.");
+    doFindTest(*finder, isc::dns::Name("below.delegatedwild.example.org"),
+               this->qtype_, isc::dns::RRType::NS(), this->rrttl_,
+               ZoneFinder::DELEGATION, this->expected_rdatas_,
+               this->expected_sig_rdatas_,
+               isc::dns::Name("delegatedwild.example.org"));
+    // FIXME: This doesn't look logically OK, GLUE_OK should make it transparent,
+    // so the match should either work or be canceled, but return NXDOMAIN
+    doFindTest(*finder, isc::dns::Name("below.delegatedwild.example.org"),
+               this->qtype_, isc::dns::RRType::NS(), this->rrttl_,
+               ZoneFinder::DELEGATION, this->expected_rdatas_,
+               this->expected_sig_rdatas_,
+               isc::dns::Name("delegatedwild.example.org"),
+               ZoneFinder::FIND_GLUE_OK);
+
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.5");
+    // These are direct matches
+    const char* positive_names[] = {
+        "wild.*.foo.example.org.",
+        "wild.*.foo.*.bar.example.org.",
+        NULL
+    };
+    for (const char** name(positive_names); *name != NULL; ++ name) {
+        doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_,
+                   this->expected_sig_rdatas_);
+    }
+
+    // These are wildcard matches against empty nonterminal asterisk
+    this->expected_rdatas_.clear();
+    const char* negative_names[] = {
+        "a.foo.example.org.",
+        "*.foo.example.org.",
+        "foo.example.org.",
+        "wild.bar.foo.example.org.",
+        "baz.foo.*.bar.example.org",
+        "baz.foo.baz.bar.example.org",
+        "*.foo.baz.bar.example.org",
+        "*.foo.*.bar.example.org",
+        "foo.*.bar.example.org",
+        "*.bar.example.org",
+        "bar.example.org",
+        NULL
+    };
+    // Unless FIND_DNSSEC is specified, this is no different from other
+    // NXRRSET case.
+    for (const char** name(negative_names); *name != NULL; ++ name) {
+        doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+                   this->expected_rdatas_, this->expected_sig_rdatas_);
+    }
+
+    // With FIND_DNSSEC, it should result in WILDCARD_NXRRSET.
+    const char* negative_dnssec_names[] = {
+        "a.bar.example.org.",
+        "foo.baz.bar.example.org.",
+        "a.foo.bar.example.org.",
+        NULL
+    };
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("wild.*.foo.*.bar.example.org. NSEC");
+    this->expected_sig_rdatas_.clear();
+    for (const char** name(negative_dnssec_names); *name != NULL; ++ name) {
+        doFindTest(*finder, isc::dns::Name(*name), this->qtype_,
+                   RRType::NSEC(), this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+                   this->expected_rdatas_, this->expected_sig_rdatas_,
+                   Name("bao.example.org."), ZoneFinder::FIND_DNSSEC);
+    }
+
+    // CNAME on a wildcard.  Maybe not so common, but not disallowed.
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("www.example.org.");
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, isc::dns::Name("a.cnamewild.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::CNAME(),
+               this->rrttl_, ZoneFinder::WILDCARD_CNAME,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+
+    // DNAME on a wildcard.  In our implementation we ignore DNAMEs on a
+    // wildcard, but at a higher level we say the behavior is "unspecified".
+    // rfc2672bis strongly discourages the mixture of DNAME and wildcard
+    // (with SHOULD NOT).
+    this->expected_rdatas_.clear();
+    this->expected_sig_rdatas_.clear();
+    doFindTest(*finder, Name("a.dnamewild.example.org."),
+               this->qtype_, this->qtype_, this->rrttl_,
+               ZoneFinder::WILDCARD_NXRRSET, this->expected_rdatas_,
+               this->expected_sig_rdatas_);
+
+    // Some strange things in the wild node
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("ns.example.com.");
+    doFindTest(*finder, isc::dns::Name("a.nswild.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::NS(),
+               this->rrttl_, ZoneFinder::DELEGATION,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, noWildcard) {
+    // Tests with the NO_WILDCARD flag.
+
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    // This would match *.wild.example.org, but with NO_WILDCARD should
+    // result in NXDOMAIN.
+    this->expected_rdatas_.push_back("cancel.here.wild.example.org. A "
+                                     "NSEC RRSIG");
+    this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+                                         "20000201000000 12345 example.org. "
+                                         "FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+               RRType::NSEC(), RRType::NSEC(), this->rrttl_,
+               ZoneFinder::NXDOMAIN, this->expected_rdatas_,
+               this->expected_sig_rdatas_, Name("*.wild.example.org."),
+               ZoneFinder::FIND_DNSSEC | ZoneFinder::NO_WILDCARD);
+
+    // Should be the same without FIND_DNSSEC (but in this case no RRsets
+    // will be returned)
+    doFindTest(*finder, isc::dns::Name("a.wild.example.org"),
+               RRType::NSEC(), RRType::NSEC(), this->rrttl_,
+               ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+               this->empty_rdatas_, Name::ROOT_NAME(), // name is dummy
+               ZoneFinder::NO_WILDCARD);
+
+    // Same for wildcard empty non terminal.
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("brokenns1.example.org. A NSEC");
+    doFindTest(*finder, isc::dns::Name("a.bar.example.org"),
+               RRType::NSEC(), RRType::NSEC(), this->rrttl_,
+               ZoneFinder::NXDOMAIN, this->expected_rdatas_,
+               this->empty_rdatas_, Name("wild.*.foo.*.bar.example.org"),
+               ZoneFinder::FIND_DNSSEC | ZoneFinder::NO_WILDCARD);
+
+    // Search for a wildcard name with NO_WILDCARD.  There should be no
+    // difference.  This is, for example, necessary to provide non existence
+    // of matching wildcard for isnx.nonterminal.example.org.
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("empty.nonterminal.example.org. NSEC");
+    doFindTest(*finder, isc::dns::Name("*.nonterminal.example.org"),
+               RRType::NSEC(), RRType::NSEC(), this->rrttl_,
+               ZoneFinder::NXDOMAIN, this->expected_rdatas_,
+               this->empty_rdatas_, Name("l.example.org"),
+               ZoneFinder::FIND_DNSSEC | ZoneFinder::NO_WILDCARD);
+
+    // On the other hand, if there's exact match for the wildcard name
+    // it should be found regardless of NO_WILDCARD.
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.5");
+    this->expected_sig_rdatas_.clear();
+    this->expected_sig_rdatas_.push_back("A 5 3 3600 20000101000000 "
+                                         "20000201000000 12345 example.org. "
+                                         "FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("*.wild.example.org"),
+               this->qtype_, this->qtype_, this->rrttl_,
+               ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->expected_sig_rdatas_, Name("*.wild.example.org"),
+               ZoneFinder::NO_WILDCARD);
+}
+
+TYPED_TEST(DatabaseClientTest, NXRRSET_NSEC) {
+    // The domain exists, but doesn't have this RRType
+    // So we should get its NSEC
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+    this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+                                         "20000201000000 12345 example.org. "
+                                         "FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("www.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+               this->rrttl_, ZoneFinder::NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, wildcardNXRRSET_NSEC) {
+    // The domain exists, but doesn't have this RRType
+    // So we should get its NSEC
+    //
+    // The user will have to query us again to get the correct
+    // answer (eg. prove there's not an exact match)
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    this->expected_rdatas_.push_back("cancel.here.wild.example.org. A NSEC "
+                                     "RRSIG");
+    this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+                                         "20000201000000 12345 example.org. "
+                                         "FAKEFAKEFAKE");
+    // Note that the NSEC name should NOT be synthesized.
+    doFindTest(*finder, isc::dns::Name("a.wild.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+               this->rrttl_, ZoneFinder::WILDCARD_NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               Name("*.wild.example.org"), ZoneFinder::FIND_DNSSEC);
+}
+
+TYPED_TEST(DatabaseClientTest, NXDOMAIN_NSEC) {
+    // The domain doesn't exist, so we must get the right NSEC
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    this->expected_rdatas_.push_back("www2.example.org. A AAAA NSEC RRSIG");
+    this->expected_sig_rdatas_.push_back("NSEC 5 3 3600 20000101000000 "
+                                         "20000201000000 12345 example.org. "
+                                         "FAKEFAKEFAKE");
+    doFindTest(*finder, isc::dns::Name("www1.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+               this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               Name("www.example.org."), ZoneFinder::FIND_DNSSEC);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("acnamesig1.example.org. NS A NSEC RRSIG");
+    // This tests it works correctly in apex (there was a bug, where a check
+    // for NS-alone was there and it would throw).
+    doFindTest(*finder, isc::dns::Name("aa.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(),
+               this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               Name("example.org."), ZoneFinder::FIND_DNSSEC);
+
+    // Check that if the DB doesn't support it, the exception from there
+    // is not propagated and it only does not include the NSEC
+    if (!this->is_mock_) {
+        return; // We don't make the real DB to throw
+    }
+    EXPECT_NO_THROW(doFindTest(*finder,
+                               isc::dns::Name("notimplnsec.example.org."),
+                               isc::dns::RRType::TXT(),
+                               isc::dns::RRType::NSEC(), this->rrttl_,
+                               ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+                               this->empty_rdatas_, Name::ROOT_NAME(),
+                               ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, emptyNonterminalNSEC) {
+    // Same as NXDOMAIN_NSEC, but with empty non-terminal
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    this->expected_rdatas_.push_back("empty.nonterminal.example.org. NSEC");
+    doFindTest(*finder, isc::dns::Name("nonterminal.example.org."),
+               isc::dns::RRType::TXT(), isc::dns::RRType::NSEC(), this->rrttl_,
+               ZoneFinder::NXRRSET,
+               this->expected_rdatas_, this->expected_sig_rdatas_,
+               Name("l.example.org."), ZoneFinder::FIND_DNSSEC);
+
+    // Check that if the DB doesn't support it, the exception from there
+    // is not propagated and it only does not include the NSEC
+    if (!this->is_mock_) {
+        return; // We don't make the real DB to throw
+    }
+    EXPECT_NO_THROW(doFindTest(*finder,
+                               isc::dns::Name("here.wild.example.org."),
+                               isc::dns::RRType::TXT(),
+                               isc::dns::RRType::NSEC(),
+                               this->rrttl_, ZoneFinder::NXRRSET,
+                               this->empty_rdatas_, this->empty_rdatas_,
+                               Name::ROOT_NAME(), ZoneFinder::FIND_DNSSEC));
+}
+
+TYPED_TEST(DatabaseClientTest, getOrigin) {
+    DataSourceClient::FindResult
+        zone(this->client_->findZone(Name("example.org")));
+    ASSERT_EQ(result::SUCCESS, zone.code);
+    shared_ptr<DatabaseClient::Finder> finder(
+        dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder));
+    if (this->is_mock_) {
+        EXPECT_EQ(READONLY_ZONE_ID, finder->zone_id());
+    }
+    EXPECT_EQ(this->zname_, finder->getOrigin());
+}
+
+TYPED_TEST(DatabaseClientTest, updaterFinder) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    ASSERT_TRUE(this->updater_);
+
+    // If this update isn't replacing the zone, the finder should work
+    // just like the normal find() case.
+    if (this->is_mock_) {
+        DatabaseClient::Finder& finder = dynamic_cast<DatabaseClient::Finder&>(
+            this->updater_->getFinder());
+        EXPECT_EQ(WRITABLE_ZONE_ID, finder.zone_id());
+    }
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    doFindTest(this->updater_->getFinder(), this->qname_,
+               this->qtype_, this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->empty_rdatas_);
+
+    // When replacing the zone, the updater's finder shouldn't see anything
+    // in the zone until something is added.
+    this->updater_.reset();
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    ASSERT_TRUE(this->updater_);
+    if (this->is_mock_) {
+        DatabaseClient::Finder& finder = dynamic_cast<DatabaseClient::Finder&>(
+            this->updater_->getFinder());
+        EXPECT_EQ(WRITABLE_ZONE_ID, finder.zone_id());
+    }
+    doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+               this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->empty_rdatas_, this->empty_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, flushZone) {
+    // A simple update case: flush the entire zone
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    // Before update, the name exists.
+    EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+                                                this->qtype_).code);
+
+    // start update in the replace mode.  the normal finder should still
+    // be able to see the record, but the updater's finder shouldn't.
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->setUpdateAccessor();
+    EXPECT_EQ(ZoneFinder::SUCCESS,
+              finder->find(this->qname_, this->qtype_).code);
+    EXPECT_EQ(ZoneFinder::NXDOMAIN,
+              this->updater_->getFinder().find(this->qname_,
+                                               this->qtype_).code);
+
+    // commit the update.  now the normal finder shouldn't see it.
+    this->updater_->commit();
+    EXPECT_EQ(ZoneFinder::NXDOMAIN, finder->find(this->qname_,
+                                                 this->qtype_).code);
+
+    // Check rollback wasn't accidentally performed.
+    EXPECT_FALSE(this->isRollbacked());
+}
+
+TYPED_TEST(DatabaseClientTest, updateCancel) {
+    // similar to the previous test, but destruct the updater before commit.
+
+    ZoneFinderPtr finder = this->client_->findZone(this->zname_).zone_finder;
+    EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+                                                this->qtype_).code);
+
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->setUpdateAccessor();
+    EXPECT_EQ(ZoneFinder::NXDOMAIN,
+              this->updater_->getFinder().find(this->qname_,
+                                               this->qtype_).code);
+    // DB should not have been rolled back yet.
+    EXPECT_FALSE(this->isRollbacked());
+    this->updater_.reset();            // destruct without commit
+
+    // reset() should have triggered rollback (although it doesn't affect
+    // anything to the mock accessor implementation except for the result of
+    // isRollbacked())
+    EXPECT_TRUE(this->isRollbacked(true));
+    EXPECT_EQ(ZoneFinder::SUCCESS, finder->find(this->qname_,
+                                                this->qtype_).code);
+}
+
+TYPED_TEST(DatabaseClientTest, exceptionFromRollback) {
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+
+    this->rrset_.reset(new RRset(Name("throw.example.org"), this->qclass_,
+                                 this->qtype_, this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "192.0.2.1"));
+    this->updater_->addRRset(*this->rrset_);
+    // destruct without commit.  The added name will result in an exception
+    // in the MockAccessor's rollback method.  It shouldn't be propagated.
+    EXPECT_NO_THROW(this->updater_.reset());
+}
+
+TYPED_TEST(DatabaseClientTest, duplicateCommit) {
+    // duplicate commit.  should result in exception.
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->updater_->commit();
+    EXPECT_THROW(this->updater_->commit(), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetToNewZone) {
+    // Add a single RRset to a fresh empty zone
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->updater_->addRRset(*this->rrset_);
+
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.2");
+    {
+        SCOPED_TRACE("add RRset");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+
+    // Similar to the previous case, but with RRSIG
+    this->updater_.reset();
+    this->updater_ = this->client_->getUpdater(this->zname_, true);
+    this->updater_->addRRset(*this->rrset_);
+    this->updater_->addRRset(*this->rrsigset_);
+
+    // confirm the expected columns were passed to the accessor (if checkable).
+    const char* const rrsig_added[] = {
+        "www.example.org.", "org.example.www.", "3600", "RRSIG", "A",
+        "A 5 3 0 20000101000000 20000201000000 0 example.org. FAKEFAKEFAKE"
+    };
+    this->checkLastAdded(rrsig_added);
+
+    this->expected_sig_rdatas_.clear();
+    this->expected_sig_rdatas_.push_back(
+        rrsig_added[DatabaseAccessor::ADD_RDATA]);
+    {
+        SCOPED_TRACE("add RRset with RRSIG");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->expected_sig_rdatas_);
+    }
+
+    // Add the non RRSIG RRset again, to see the attempt of adding RRSIG
+    // causes any unexpected effect, in particular, whether the SIGTYPE
+    // field might remain.
+    this->updater_->addRRset(*this->rrset_);
+    const char* const rrset_added[] = {
+        "www.example.org.", "org.example.www.", "3600", "A", "", "192.0.2.2"
+    };
+    this->checkLastAdded(rrset_added);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetToCurrentZone) {
+    // Similar to the previous test, but not replacing the existing data.
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->updater_->addRRset(*this->rrset_);
+
+    // We should see both old and new data.
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    {
+        SCOPED_TRACE("add RRset");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+    this->updater_->commit();
+    {
+        SCOPED_TRACE("add RRset after commit");
+        doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+                   this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+                   this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, addMultipleRRs) {
+    // Similar to the previous case, but the added RRset contains multiple
+    // RRs.
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "192.0.2.3"));
+    this->updater_->addRRset(*this->rrset_);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    this->expected_rdatas_.push_back("192.0.2.3");
+    {
+        SCOPED_TRACE("add multiple RRs");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetOfLargerTTL) {
+    // Similar to the previous one, but the TTL of the added RRset is larger
+    // than that of the existing record.  The finder should use the smaller
+    // one.
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->rrset_->setTTL(RRTTL(7200));
+    this->updater_->addRRset(*this->rrset_);
+
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    {
+        SCOPED_TRACE("add RRset of larger TTL");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetOfSmallerTTL) {
+    // Similar to the previous one, but the added RRset has a smaller TTL.
+    // The added TTL should be used by the finder.
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->rrset_->setTTL(RRTTL(1800));
+    this->updater_->addRRset(*this->rrset_);
+
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    {
+        SCOPED_TRACE("add RRset of smaller TTL");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, RRTTL(1800), ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, addSameRR) {
+    // Add the same RR as that is already in the data source.
+    // Currently the add interface doesn't try to suppress the duplicate,
+    // neither does the finder.  We may want to revisit it in future versions.
+
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+                                 this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "192.0.2.1"));
+    this->updater_->addRRset(*this->rrset_);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.1");
+    {
+        SCOPED_TRACE("add same RR");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, addDeviantRR) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+    // RR class mismatch.  This should be detected and rejected.
+    this->rrset_.reset(new RRset(this->qname_, RRClass::CH(), RRType::TXT(),
+                                 this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "test text"));
+    EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+
+    // Out-of-zone owner name.  At a higher level this should be rejected,
+    // but it doesn't happen in this interface.
+    this->rrset_.reset(new RRset(Name("example.com"), this->qclass_,
+                                 this->qtype_, this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "192.0.2.100"));
+    this->updater_->addRRset(*this->rrset_);
+
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.100");
+    {
+        // Note: with the find() implementation being more strict about
+        // zone cuts, this test may fail.  Then the test should be updated.
+        SCOPED_TRACE("add out-of-zone RR");
+        doFindTest(this->updater_->getFinder(), Name("example.com"),
+                   this->qtype_, this->qtype_, this->rrttl_,
+                   ZoneFinder::SUCCESS, this->expected_rdatas_,
+                   this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, addEmptyRRset) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+                                 this->rrttl_));
+    EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addAfterCommit) {
+   this->updater_ = this->client_->getUpdater(this->zname_, false);
+   this->updater_->commit();
+   EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, addRRsetWithRRSIG) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->rrset_->addRRsig(*this->rrsigset_);
+    EXPECT_THROW(this->updater_->addRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRset) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+                                 this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "192.0.2.1"));
+
+    // Delete one RR from an RRset
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->updater_->deleteRRset(*this->rrset_);
+
+    // Delete the only RR of a name
+    this->rrset_.reset(new RRset(Name("cname.example.org"), this->qclass_,
+                          RRType::CNAME(), this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "www.example.org"));
+    this->updater_->deleteRRset(*this->rrset_);
+
+    // The this->updater_ finder should immediately see the deleted results.
+    {
+        SCOPED_TRACE("delete RRset");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+                   this->empty_rdatas_, this->empty_rdatas_);
+        doFindTest(this->updater_->getFinder(), Name("cname.example.org"),
+                   this->qtype_, this->qtype_, this->rrttl_,
+                   ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+                   this->empty_rdatas_);
+    }
+
+    // before committing the change, the original finder should see the
+    // original record.
+    {
+        SCOPED_TRACE("delete RRset before commit");
+        this->expected_rdatas_.push_back("192.0.2.1");
+        doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+                   this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+                   this->empty_rdatas_);
+
+        this->expected_rdatas_.clear();
+        this->expected_rdatas_.push_back("www.example.org.");
+        doFindTest(*finder, Name("cname.example.org"), this->qtype_,
+                   RRType::CNAME(), this->rrttl_, ZoneFinder::CNAME,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+
+    // once committed, the record should be removed from the original finder's
+    // view, too.
+    this->updater_->commit();
+    {
+        SCOPED_TRACE("delete RRset after commit");
+        doFindTest(*finder, this->qname_, this->qtype_, this->qtype_,
+                   this->rrttl_, ZoneFinder::NXRRSET, this->empty_rdatas_,
+                   this->empty_rdatas_);
+        doFindTest(*finder, Name("cname.example.org"), this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::NXDOMAIN,
+                   this->empty_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRsetToNXDOMAIN) {
+    // similar to the previous case, but it removes the only record of the
+    // given name.  a subsequent find() should result in NXDOMAIN.
+    this->rrset_.reset(new RRset(Name("cname.example.org"), this->qclass_,
+                           RRType::CNAME(), this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "www.example.org"));
+
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->updater_->deleteRRset(*this->rrset_);
+    {
+        SCOPED_TRACE("delete RRset to NXDOMAIN");
+        doFindTest(this->updater_->getFinder(), Name("cname.example.org"),
+                   this->qtype_, this->qtype_, this->rrttl_,
+                   ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+                   this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteMultipleRRs) {
+    this->rrset_.reset(new RRset(this->qname_, this->qclass_, RRType::AAAA(),
+                                 this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "2001:db8::1"));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "2001:db8::2"));
+
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->updater_->deleteRRset(*this->rrset_);
+
+    {
+        SCOPED_TRACE("delete multiple RRs");
+        doFindTest(this->updater_->getFinder(), this->qname_, RRType::AAAA(),
+                   this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+                   this->empty_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, partialDelete) {
+    this->rrset_.reset(new RRset(this->qname_, this->qclass_, RRType::AAAA(),
+                                 this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "2001:db8::1"));
+    // This does not exist in the test data source:
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "2001:db8::3"));
+
+    // deleteRRset should succeed "silently", and subsequent find() should
+    // find the remaining RR.
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->updater_->deleteRRset(*this->rrset_);
+    {
+        SCOPED_TRACE("partial delete");
+        this->expected_rdatas_.push_back("2001:db8::2");
+        doFindTest(this->updater_->getFinder(), this->qname_, RRType::AAAA(),
+                   RRType::AAAA(), this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteNoMatch) {
+    // similar to the previous test, but there's not even a match in the
+    // specified RRset.  Essentially there's no difference in the result.
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->updater_->deleteRRset(*this->rrset_);
+    {
+        SCOPED_TRACE("delete no match");
+        this->expected_rdatas_.push_back("192.0.2.1");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+                   this->expected_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteWithDifferentTTL) {
+    // Our delete interface simply ignores TTL (may change in a future version)
+    this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+                                 RRTTL(1800)));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "192.0.2.1"));
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->updater_->deleteRRset(*this->rrset_);
+    {
+        SCOPED_TRACE("delete RRset with a different TTL");
+        doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+                   this->qtype_, this->rrttl_, ZoneFinder::NXRRSET,
+                   this->empty_rdatas_, this->empty_rdatas_);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, deleteDeviantRR) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+    // RR class mismatch.  This should be detected and rejected.
+    this->rrset_.reset(new RRset(this->qname_, RRClass::CH(), RRType::TXT(),
+                                 this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "test text"));
+    EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+
+    // Out-of-zone owner name.  At a higher level this should be rejected,
+    // but it doesn't happen in this interface.
+    this->rrset_.reset(new RRset(Name("example.com"), this->qclass_,
+                                 this->qtype_, this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "192.0.2.100"));
+    EXPECT_NO_THROW(this->updater_->deleteRRset(*this->rrset_));
+}
+
+TYPED_TEST(DatabaseClientTest, deleteAfterCommit) {
+   this->updater_ = this->client_->getUpdater(this->zname_, false);
+   this->updater_->commit();
+   EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteEmptyRRset) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->rrset_.reset(new RRset(this->qname_, this->qclass_, this->qtype_,
+                                 this->rrttl_));
+    EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, deleteRRsetWithRRSIG) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+    this->rrset_->addRRsig(*this->rrsigset_);
+    EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_), DataSourceError);
+}
+
+TYPED_TEST(DatabaseClientTest, compoundUpdate) {
+    // This test case performs an arbitrary chosen add/delete operations
+    // in a single update transaction.  Essentially there is nothing new to
+    // test here, but there may be some bugs that was overlooked and can
+    // only happen in the compound update scenario, so we test it.
+
+    this->updater_ = this->client_->getUpdater(this->zname_, false);
+
+    // add a new RR to an existing RRset
+    this->updater_->addRRset(*this->rrset_);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.1");
+    this->expected_rdatas_.push_back("192.0.2.2");
+    doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+               this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->empty_rdatas_);
+
+    // delete an existing RR
+    this->rrset_.reset(new RRset(Name("www.example.org"), this->qclass_,
+                                 this->qtype_, this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "192.0.2.1"));
+    this->updater_->deleteRRset(*this->rrset_);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.2");
+    doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+               this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->empty_rdatas_);
+
+    // re-add it
+    this->updater_->addRRset(*this->rrset_);
+    this->expected_rdatas_.push_back("192.0.2.1");
+    doFindTest(this->updater_->getFinder(), this->qname_, this->qtype_,
+               this->qtype_, this->rrttl_, ZoneFinder::SUCCESS,
+               this->expected_rdatas_, this->empty_rdatas_);
+
+    // add a new RR with a new name
+    const Name newname("newname.example.org");
+    const RRType newtype(RRType::AAAA());
+    doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+               this->rrttl_, ZoneFinder::NXDOMAIN, this->empty_rdatas_,
+               this->empty_rdatas_);
+    this->rrset_.reset(new RRset(newname, this->qclass_, newtype,
+                                 this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "2001:db8::10"));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "2001:db8::11"));
+    this->updater_->addRRset(*this->rrset_);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("2001:db8::10");
+    this->expected_rdatas_.push_back("2001:db8::11");
+    doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->empty_rdatas_);
+
+    // delete one RR from the previous set
+    this->rrset_.reset(new RRset(newname, this->qclass_, newtype,
+                                 this->rrttl_));
+    this->rrset_->addRdata(rdata::createRdata(this->rrset_->getType(),
+                                              this->rrset_->getClass(),
+                                              "2001:db8::11"));
+    this->updater_->deleteRRset(*this->rrset_);
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("2001:db8::10");
+    doFindTest(this->updater_->getFinder(), newname, newtype, newtype,
+               this->rrttl_, ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->empty_rdatas_);
+
+    // Commit the changes, confirm the entire changes applied.
+    this->updater_->commit();
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("192.0.2.2");
+    this->expected_rdatas_.push_back("192.0.2.1");
+    doFindTest(*finder, this->qname_, this->qtype_, this->qtype_, this->rrttl_,
+               ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->empty_rdatas_);
+
+    this->expected_rdatas_.clear();
+    this->expected_rdatas_.push_back("2001:db8::10");
+    doFindTest(*finder, newname, newtype, newtype, this->rrttl_,
+               ZoneFinder::SUCCESS, this->expected_rdatas_,
+               this->empty_rdatas_);
+}
+
+TYPED_TEST(DatabaseClientTest, previous) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    EXPECT_EQ(Name("www.example.org."),
+              finder->findPreviousName(Name("www2.example.org.")));
+    // Check a name that doesn't exist there
+    EXPECT_EQ(Name("www.example.org."),
+              finder->findPreviousName(Name("www1.example.org.")));
+    if (this->is_mock_) { // We can't really force the DB to throw
+        // Check it doesn't crash or anything if the underlying DB throws
+        DataSourceClient::FindResult
+            zone(this->client_->findZone(Name("bad.example.org")));
+        finder =
+            dynamic_pointer_cast<DatabaseClient::Finder>(zone.zone_finder);
+
+        EXPECT_THROW(finder->findPreviousName(Name("bad.example.org")),
+                     isc::NotImplemented);
+    } else {
+        // No need to test this on mock one, because we test only that
+        // the exception gets through
+
+        // A name before the origin
+        EXPECT_THROW(finder->findPreviousName(Name("example.com")),
+                     isc::NotImplemented);
+    }
+}
+
+TYPED_TEST(DatabaseClientTest, invalidRdata) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    EXPECT_THROW(finder->find(Name("invalidrdata.example.org."), RRType::A()),
+                 DataSourceError);
+    EXPECT_THROW(finder->find(Name("invalidrdata2.example.org."), RRType::A()),
+                 DataSourceError);
+}
+
+TEST_F(MockDatabaseClientTest, missingNSEC) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    /*
+     * FIXME: For now, we can't really distinguish this bogus input
+     * from not-signed zone so we can't throw. But once we can,
+     * enable the original test.
+     */
+#if 0
+    EXPECT_THROW(finder->find(Name("badnsec2.example.org."), RRType::A(), NULL,
+                              ZoneFinder::FIND_DNSSEC),
+                 DataSourceError);
+#endif
+    doFindTest(*finder, Name("badnsec2.example.org."), RRType::A(),
+               RRType::A(), this->rrttl_, ZoneFinder::NXDOMAIN,
+               this->expected_rdatas_, this->expected_sig_rdatas_);
+}
+
+TEST_F(MockDatabaseClientTest, badName) {
+    shared_ptr<DatabaseClient::Finder> finder(this->getFinder());
+
+    EXPECT_THROW(finder->findPreviousName(Name("brokenname.example.org.")),
+                 DataSourceError);
+}
+
+/*
+ * Test correct use of the updater with a journal.
+ */
+TYPED_TEST(DatabaseClientTest, journal) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+    this->updater_->deleteRRset(*this->soa_);
+    this->updater_->deleteRRset(*this->rrset_);
+    this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                               this->rrttl_));
+    this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+                                            this->soa_->getClass(),
+                                            "ns1.example.org. "
+                                            "admin.example.org. "
+                                            "1235 3600 1800 2419200 7200"));
+    this->updater_->addRRset(*this->soa_);
+    this->updater_->addRRset(*this->rrset_);
+    ASSERT_NO_THROW(this->updater_->commit());
+    std::vector<JournalEntry> expected;
+    expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+                                    DatabaseAccessor::DIFF_DELETE,
+                                    "example.org.", "SOA", "3600",
+                                    "ns1.example.org. admin.example.org. "
+                                    "1234 3600 1800 2419200 7200"));
+    expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+                                    DatabaseAccessor::DIFF_DELETE,
+                                    "www.example.org.", "A", "3600",
+                                    "192.0.2.2"));
+    expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1235,
+                                    DatabaseAccessor::DIFF_ADD,
+                                    "example.org.", "SOA", "3600",
+                                    "ns1.example.org. admin.example.org. "
+                                    "1235 3600 1800 2419200 7200"));
+    expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1235,
+                                    DatabaseAccessor::DIFF_ADD,
+                                    "www.example.org.", "A", "3600",
+                                    "192.0.2.2"));
+    this->checkJournal(expected);
+}
+
+/*
+ * Push multiple delete-add sequences. Checks it is allowed and all is
+ * saved.
+ */
+TYPED_TEST(DatabaseClientTest, journalMultiple) {
+    std::vector<JournalEntry> expected;
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+    std::string soa_rdata = "ns1.example.org. admin.example.org. "
+        "1234 3600 1800 2419200 7200";
+    for (size_t i(1); i < 100; ++ i) {
+        // Remove the old SOA
+        this->updater_->deleteRRset(*this->soa_);
+        expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i - 1,
+                                        DatabaseAccessor::DIFF_DELETE,
+                                        "example.org.", "SOA", "3600",
+                                        soa_rdata));
+        // Create a new SOA
+        soa_rdata = "ns1.example.org. admin.example.org. " +
+            lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+        this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                   this->rrttl_));
+        this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+                                                this->soa_->getClass(),
+                                                soa_rdata));
+        // Add the new SOA
+        this->updater_->addRRset(*this->soa_);
+        expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i,
+                                        DatabaseAccessor::DIFF_ADD,
+                                        "example.org.", "SOA", "3600",
+                                        soa_rdata));
+    }
+    ASSERT_NO_THROW(this->updater_->commit());
+    // Check the journal contains everything.
+    this->checkJournal(expected);
+}
+
+/*
+ * Test passing a forbidden sequence to it and expect it to throw.
+ *
+ * Note that we implicitly test in different testcases (these for add and
+ * delete) that if the journaling is false, it doesn't expect the order.
+ *
+ * In this test we don't check with the real databases as this case shouldn't
+ * contain backend specific behavior.
+ */
+TEST_F(MockDatabaseClientTest, journalBadSequence) {
+    std::vector<JournalEntry> expected;
+    {
+        SCOPED_TRACE("Delete A before SOA");
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_),
+                     isc::BadValue);
+        // Make sure the journal is empty now
+        this->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Add before delete");
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
+        // Make sure the journal is empty now
+        this->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Add A before SOA");
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        // So far OK
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+        // But we miss the add SOA here
+        EXPECT_THROW(this->updater_->addRRset(*this->rrset_), isc::BadValue);
+        // Make sure the journal contains only the first one
+        expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+                                        DatabaseAccessor::DIFF_DELETE,
+                                        "example.org.", "SOA", "3600",
+                                        "ns1.example.org. admin.example.org. "
+                                        "1234 3600 1800 2419200 7200"));
+        this->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Commit before add");
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        // So far OK
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+        // Commit at the wrong time
+        EXPECT_THROW(updater_->commit(), isc::BadValue);
+        current_accessor_->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Delete two SOAs");
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        // So far OK
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+        // Delete the SOA again
+        EXPECT_THROW(this->updater_->deleteRRset(*this->soa_), isc::BadValue);
+        this->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Add two SOAs");
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        // So far OK
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
+        // Still OK
+        EXPECT_NO_THROW(this->updater_->addRRset(*this->soa_));
+        // But this one is added again
+        EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
+        expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+                                        DatabaseAccessor::DIFF_ADD,
+                                        "example.org.", "SOA", "3600",
+                                        "ns1.example.org. admin.example.org. "
+                                        "1234 3600 1800 2419200 7200"));
+        this->checkJournal(expected);
+    }
+}
+
+/*
+ * Test it rejects to store journals when we request it together with
+ * erasing the whole zone.
+ */
+TYPED_TEST(DatabaseClientTest, journalOnErase) {
+    EXPECT_THROW(this->client_->getUpdater(this->zname_, true, true),
+                 isc::BadValue);
+}
+
+/*
+ * Check that exception is propagated when the journal is not implemented.
+ */
+TEST_F(MockDatabaseClientTest, journalNotImplemented) {
+    updater_ = client_->getUpdater(Name("null.example.org"), false, true);
+    EXPECT_THROW(updater_->deleteRRset(*soa_), isc::NotImplemented);
+    soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+    soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+                                      "ns1.example.org. admin.example.org. "
+                                      "1234 3600 1800 2419201 7200"));
+    EXPECT_THROW(updater_->addRRset(*soa_), isc::NotImplemented);
+}
+
+/*
+ * Test that different exceptions are propagated.
+ */
+TEST_F(MockDatabaseClientTest, journalException) {
+    updater_ = client_->getUpdater(Name("bad.example.org"), false, true);
+    EXPECT_THROW(updater_->deleteRRset(*soa_), DataSourceError);
+}
+
+//
+// Tests for the ZoneJournalReader
+//
+
+// Install a simple, commonly used diff sequence: making an update from one
+// SOA to another.  Return the end SOA RRset for the convenience of the caller.
+ConstRRsetPtr
+makeSimpleDiff(DataSourceClient& client, const Name& zname,
+               const RRClass& rrclass, ConstRRsetPtr begin_soa)
+{
+    ZoneUpdaterPtr updater = client.getUpdater(zname, false, true);
+    updater->deleteRRset(*begin_soa);
+    RRsetPtr soa_end(new RRset(zname, rrclass, RRType::SOA(), RRTTL(3600)));
+    soa_end->addRdata(rdata::createRdata(RRType::SOA(), rrclass,
+                                         "ns1.example.org. admin.example.org. "
+                                         "1235 3600 1800 2419200 7200"));
+    updater->addRRset(*soa_end);
+    updater->commit();
+
+    return (soa_end);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReader) {
+    // Check the simple case made by makeSimpleDiff.
+    ConstRRsetPtr soa_end = makeSimpleDiff(*this->client_, this->zname_,
+                                           this->qclass_, this->soa_);
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(this->zname_, 1234, 1235);
+    EXPECT_EQ(ZoneJournalReader::SUCCESS, result.first);
+    ZoneJournalReaderPtr jnl_reader = result.second;
+    ASSERT_TRUE(jnl_reader);
+    ConstRRsetPtr rrset = jnl_reader->getNextDiff();
+    ASSERT_TRUE(rrset);
+    isc::testutils::rrsetCheck(this->soa_, rrset);
+    rrset = jnl_reader->getNextDiff();
+    ASSERT_TRUE(rrset);
+    isc::testutils::rrsetCheck(soa_end, rrset);
+    rrset = jnl_reader->getNextDiff();
+    ASSERT_FALSE(rrset);
+
+    // Once it reaches the end of the sequence, further read attempt will
+    // result in exception.
+    EXPECT_THROW(jnl_reader->getNextDiff(), isc::InvalidOperation);
+}
+
+TYPED_TEST(DatabaseClientTest, readLargeJournal) {
+    // Similar to journalMultiple, but check that at a higher level.
+
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+
+    vector<ConstRRsetPtr> expected;
+    for (size_t i = 0; i < 100; ++i) {
+        // Create the old SOA and remove it, and record it in the expected list
+        RRsetPtr rrset1(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                  this->rrttl_));
+        string soa_rdata = "ns1.example.org. admin.example.org. " +
+            lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+        rrset1->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+                                            soa_rdata));
+        this->updater_->deleteRRset(*rrset1);
+        expected.push_back(rrset1);
+
+        // Create a new SOA, add it, and record it.
+        RRsetPtr rrset2(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                  this->rrttl_));
+        soa_rdata = "ns1.example.org. admin.example.org. " +
+            lexical_cast<std::string>(1234 + i + 1) +
+            " 3600 1800 2419200 7200";
+        rrset2->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+                                            soa_rdata));
+        this->updater_->addRRset(*rrset2);
+        expected.push_back(rrset2);
+    }
+    this->updater_->commit();
+
+    ZoneJournalReaderPtr jnl_reader(this->client_->getJournalReader(
+                                        this->zname_, 1234, 1334).second);
+    ConstRRsetPtr actual;
+    int i = 0;
+    while ((actual = jnl_reader->getNextDiff()) != NULL) {
+        isc::testutils::rrsetCheck(expected.at(i++), actual);
+    }
+    EXPECT_EQ(expected.size(), i); // we should have eaten all expected data
+}
+
+TYPED_TEST(DatabaseClientTest, readJournalForNoRange) {
+    makeSimpleDiff(*this->client_, this->zname_, this->qclass_, this->soa_);
+
+    // The specified range does not exist in the diff storage.  The factory
+    // method should result in NO_SUCH_VERSION
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(this->zname_, 1200, 1235);
+    EXPECT_EQ(ZoneJournalReader::NO_SUCH_VERSION, result.first);
+    EXPECT_FALSE(result.second);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReaderForNXZone) {
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(Name("nosuchzone"), 0, 1);
+    EXPECT_EQ(ZoneJournalReader::NO_SUCH_ZONE, result.first);
+    EXPECT_FALSE(result.second);
+}
+
+// A helper function for journalWithBadData.  It installs a simple diff
+// from one serial (of 'begin') to another ('begin' + 1), tweaking a specified
+// field of data with some invalid value.
+void
+installBadDiff(MockAccessor& accessor, uint32_t begin,
+               DatabaseAccessor::DiffRecordParams modify_param,
+               const char* const data)
+{
+    string data1[] = {"example.org.", "SOA", "3600", "ns. root. 1 1 1 1 1"};
+    string data2[] = {"example.org.", "SOA", "3600", "ns. root. 2 1 1 1 1"};
+    data1[modify_param] = data;
+    accessor.addRecordDiff(READONLY_ZONE_ID, begin,
+                           DatabaseAccessor::DIFF_DELETE, data1);
+    accessor.addRecordDiff(READONLY_ZONE_ID, begin + 1,
+                           DatabaseAccessor::DIFF_ADD, data2);
+}
+
+TEST_F(MockDatabaseClientTest, journalWithBadData) {
+    MockAccessor& mock_accessor =
+        dynamic_cast<MockAccessor&>(*current_accessor_);
+
+    // One of the fields from the data source is broken as an RR parameter.
+    // The journal reader should still be constructed, but getNextDiff()
+    // should result in exception.
+    installBadDiff(mock_accessor, 1, DatabaseAccessor::DIFF_NAME,
+                   "example..org");
+    installBadDiff(mock_accessor, 3, DatabaseAccessor::DIFF_TYPE,
+                   "bad-rrtype");
+    installBadDiff(mock_accessor, 5, DatabaseAccessor::DIFF_TTL,
+                   "bad-ttl");
+    installBadDiff(mock_accessor, 7, DatabaseAccessor::DIFF_RDATA,
+                   "bad rdata");
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 1, 2).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 3, 4).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 5, 6).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 7, 8).
+                 second->getNextDiff(), DataSourceError);
+}
+
+}
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
new file mode 100644
index 0000000..e98f9bc
--- /dev/null
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -0,0 +1,240 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/scoped_ptr.hpp>
+
+#include <datasrc/datasrc_config.h>
+#include <datasrc/factory.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include <dns/rrclass.h>
+#include <cc/data.h>
+
+#include <gtest/gtest.h>
+
+using namespace isc::datasrc;
+using namespace isc::data;
+
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+
+namespace {
+
+// note this helper only checks the error that is received up to the length
+// of the expected string. It will always pass if you give it an empty
+// expected_error
+void
+pathtestHelper(const std::string& file, const std::string& expected_error) {
+    std::string error;
+    try {
+        DataSourceClientContainer(file, ElementPtr());
+    } catch (const DataSourceLibraryError& dsle) {
+        error = dsle.what();
+    }
+    ASSERT_LT(expected_error.size(), error.size());
+    EXPECT_EQ(expected_error, error.substr(0, expected_error.size()));
+}
+
+TEST(FactoryTest, paths) {
+    // Test whether the paths are made absolute if they are not,
+    // by inspecting the error that is raised when they are wrong
+    const std::string error("dlopen failed for ");
+    // With the current implementation, we can safely assume this has
+    // been set for this test (as the loader would otherwise also fail
+    // unless the loadable backend library happens to be installed)
+    const std::string builddir(getenv("B10_FROM_BUILD"));
+
+    // Absolute and ending with .so should have no change
+    pathtestHelper("/no_such_file.so", error + "/no_such_file.so");
+
+    // If no ending in .so, it should get _ds.so
+    pathtestHelper("/no_such_file", error + "/no_such_file_ds.so");
+
+    // If not starting with /, path should be added. For this test that
+    // means the build directory as set in B10_FROM_BUILD
+    pathtestHelper("no_such_file.so", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so");
+    pathtestHelper("no_such_file", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file_ds.so");
+
+    // Some tests with '.so' in the name itself
+    pathtestHelper("no_such_file.so.something", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so.something_ds.so");
+    pathtestHelper("/no_such_file.so.something", error +
+                   "/no_such_file.so.something_ds.so");
+    pathtestHelper("/no_such_file.so.something.so", error +
+                   "/no_such_file.so.something.so");
+    pathtestHelper("/no_such_file.so.so", error +
+                   "/no_such_file.so.so");
+    pathtestHelper("no_such_file.so.something", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so.something_ds.so");
+
+    // Temporarily unset B10_FROM_BUILD to see that BACKEND_LIBRARY_PATH
+    // is used
+    unsetenv("B10_FROM_BUILD");
+    pathtestHelper("no_such_file.so", error + BACKEND_LIBRARY_PATH +
+                   "no_such_file.so");
+    // Put it back just in case
+    setenv("B10_FROM_BUILD", builddir.c_str(), 1);
+
+    // Test some bad input values
+    ASSERT_THROW(DataSourceClientContainer("", ElementPtr()),
+                 DataSourceLibraryError);
+    ASSERT_THROW(DataSourceClientContainer(".so", ElementPtr()),
+                 DataSourceLibraryError);
+}
+
+TEST(FactoryTest, sqlite3ClientBadConfig) {
+    // We start out by building the configuration data bit by bit,
+    // testing each form of 'bad config', until we have a good one.
+    // Then we do some very basic operation on the client (detailed
+    // tests are left to the implementation-specific backends)
+    ElementPtr config;
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config = Element::create("asdf");
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config = Element::createMap();
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config->set("class", ElementPtr());
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config->set("class", Element::create(1));
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config->set("class", Element::create("FOO"));
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config->set("class", Element::create("IN"));
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config->set("database_file", ElementPtr());
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config->set("database_file", Element::create(1));
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config->set("database_file", Element::create("/foo/bar/doesnotexist"));
+    ASSERT_THROW(DataSourceClientContainer("sqlite3", config),
+                 DataSourceError);
+
+    config->set("database_file", Element::create(SQLITE_DBFILE_EXAMPLE_ORG));
+    DataSourceClientContainer dsc("sqlite3", config);
+
+    DataSourceClient::FindResult result1(
+        dsc.getInstance().findZone(isc::dns::Name("example.org.")));
+    ASSERT_EQ(result::SUCCESS, result1.code);
+
+    DataSourceClient::FindResult result2(
+        dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+    ASSERT_EQ(result::NOTFOUND, result2.code);
+
+    ZoneIteratorPtr iterator(dsc.getInstance().getIterator(
+        isc::dns::Name("example.org.")));
+
+    ZoneUpdaterPtr updater(dsc.getInstance().getUpdater(
+        isc::dns::Name("example.org."), false));
+}
+
+TEST(FactoryTest, memoryClient) {
+    // We start out by building the configuration data bit by bit,
+    // testing each form of 'bad config', until we have a good one.
+    // Then we do some very basic operation on the client (detailed
+    // tests are left to the implementation-specific backends)
+    ElementPtr config;
+    ASSERT_THROW(DataSourceClientContainer client("memory", config),
+                 DataSourceError);
+
+    config = Element::create("asdf");
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config = Element::createMap();
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("type", ElementPtr());
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("type", Element::create(1));
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("type", Element::create("FOO"));
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("type", Element::create("memory"));
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("class", ElementPtr());
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("class", Element::create(1));
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("class", Element::create("FOO"));
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("class", Element::create("IN"));
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("zones", ElementPtr());
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("zones", Element::create(1));
+    ASSERT_THROW(DataSourceClientContainer("memory", config),
+                 DataSourceError);
+
+    config->set("zones", Element::createList());
+    DataSourceClientContainer dsc("memory", config);
+
+    // Once it is able to load some zones, we should add a few tests
+    // here to see that it does.
+    DataSourceClient::FindResult result(
+        dsc.getInstance().findZone(isc::dns::Name("no.such.zone.")));
+    ASSERT_EQ(result::NOTFOUND, result.code);
+
+    ASSERT_THROW(dsc.getInstance().getIterator(isc::dns::Name("example.org.")),
+                 DataSourceError);
+
+    ASSERT_THROW(dsc.getInstance().getUpdater(isc::dns::Name("no.such.zone."),
+                                              false), isc::NotImplemented);
+}
+
+TEST(FactoryTest, badType) {
+    ASSERT_THROW(DataSourceClientContainer("foo", ElementPtr()),
+                                           DataSourceError);
+}
+
+} // end anonymous namespace
+
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 83fbb58..a1bd94e 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -29,6 +29,8 @@
 #include <dns/masterload.h>
 
 #include <datasrc/memory_datasrc.h>
+#include <datasrc/data_source.h>
+#include <datasrc/iterator.h>
 
 #include <gtest/gtest.h>
 
@@ -42,119 +44,221 @@ namespace {
 using result::SUCCESS;
 using result::EXIST;
 
-class MemoryDataSrcTest : public ::testing::Test {
+class InMemoryClientTest : public ::testing::Test {
 protected:
-    MemoryDataSrcTest() : rrclass(RRClass::IN())
+    InMemoryClientTest() : rrclass(RRClass::IN())
     {}
     RRClass rrclass;
-    MemoryDataSrc memory_datasrc;
+    InMemoryClient memory_client;
 };
 
-TEST_F(MemoryDataSrcTest, add_find_Zone) {
+TEST_F(InMemoryClientTest, add_find_Zone) {
     // test add zone
     // Bogus zone (NULL)
-    EXPECT_THROW(memory_datasrc.addZone(ZonePtr()), isc::InvalidParameter);
+    EXPECT_THROW(memory_client.addZone(ZoneFinderPtr()),
+                 isc::InvalidParameter);
 
     // add zones with different names one by one
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::IN(), Name("a")))));
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::CH(), Name("b")))));
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::IN(), Name("c")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+                                                       Name("a")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+                                                       Name("b")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+                                                       Name("c")))));
     // add zones with the same name suffix
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::CH(),
-                                         Name("x.d.e.f")))));
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::CH(),
-                                         Name("o.w.y.d.e.f")))));
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::CH(),
-                                         Name("p.w.y.d.e.f")))));
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::IN(),
-                                         Name("q.w.y.d.e.f")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+                                                       Name("x.d.e.f")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+                                                       Name("o.w.y.d.e.f")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+                                                       Name("p.w.y.d.e.f")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+                                                       Name("q.w.y.d.e.f")))));
     // add super zone and its subzone
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::CH(), Name("g.h")))));
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::IN(), Name("i.g.h")))));
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::IN(),
-                                         Name("z.d.e.f")))));
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::IN(),
-                                         Name("j.z.d.e.f")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+                                                       Name("g.h")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+                                               Name("i.g.h")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+                                                       Name("z.d.e.f")))));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+                                                       Name("j.z.d.e.f")))));
 
     // different zone class isn't allowed.
-    EXPECT_EQ(result::EXIST, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::CH(),
-                                         Name("q.w.y.d.e.f")))));
+    EXPECT_EQ(result::EXIST, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+                                                       Name("q.w.y.d.e.f")))));
 
     // names are compared in a case insensitive manner.
-    EXPECT_EQ(result::EXIST, memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(RRClass::IN(),
-                                         Name("Q.W.Y.d.E.f")))));
+    EXPECT_EQ(result::EXIST, memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+                                                       Name("Q.W.Y.d.E.f")))));
 
     // test find zone
-    EXPECT_EQ(result::SUCCESS, memory_datasrc.findZone(Name("a")).code);
+    EXPECT_EQ(result::SUCCESS, memory_client.findZone(Name("a")).code);
     EXPECT_EQ(Name("a"),
-              memory_datasrc.findZone(Name("a")).zone->getOrigin());
+              memory_client.findZone(Name("a")).zone_finder->getOrigin());
 
     EXPECT_EQ(result::SUCCESS,
-              memory_datasrc.findZone(Name("j.z.d.e.f")).code);
+              memory_client.findZone(Name("j.z.d.e.f")).code);
     EXPECT_EQ(Name("j.z.d.e.f"),
-              memory_datasrc.findZone(Name("j.z.d.e.f")).zone->getOrigin());
+              memory_client.findZone(Name("j.z.d.e.f")).zone_finder->
+                  getOrigin());
 
     // NOTFOUND
-    EXPECT_EQ(result::NOTFOUND, memory_datasrc.findZone(Name("d.e.f")).code);
-    EXPECT_EQ(ConstZonePtr(), memory_datasrc.findZone(Name("d.e.f")).zone);
+    EXPECT_EQ(result::NOTFOUND, memory_client.findZone(Name("d.e.f")).code);
+    EXPECT_EQ(ConstZoneFinderPtr(),
+              memory_client.findZone(Name("d.e.f")).zone_finder);
 
     EXPECT_EQ(result::NOTFOUND,
-              memory_datasrc.findZone(Name("w.y.d.e.f")).code);
-    EXPECT_EQ(ConstZonePtr(),
-              memory_datasrc.findZone(Name("w.y.d.e.f")).zone);
+              memory_client.findZone(Name("w.y.d.e.f")).code);
+    EXPECT_EQ(ConstZoneFinderPtr(),
+              memory_client.findZone(Name("w.y.d.e.f")).zone_finder);
 
     // there's no exact match.  the result should be the longest match,
     // and the code should be PARTIALMATCH.
     EXPECT_EQ(result::PARTIALMATCH,
-              memory_datasrc.findZone(Name("j.g.h")).code);
+              memory_client.findZone(Name("j.g.h")).code);
     EXPECT_EQ(Name("g.h"),
-              memory_datasrc.findZone(Name("g.h")).zone->getOrigin());
+              memory_client.findZone(Name("g.h")).zone_finder->getOrigin());
 
     EXPECT_EQ(result::PARTIALMATCH,
-              memory_datasrc.findZone(Name("z.i.g.h")).code);
+              memory_client.findZone(Name("z.i.g.h")).code);
     EXPECT_EQ(Name("i.g.h"),
-              memory_datasrc.findZone(Name("z.i.g.h")).zone->getOrigin());
+              memory_client.findZone(Name("z.i.g.h")).zone_finder->
+                  getOrigin());
 }
 
-TEST_F(MemoryDataSrcTest, getZoneCount) {
-    EXPECT_EQ(0, memory_datasrc.getZoneCount());
-    memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(rrclass, Name("example.com"))));
-    EXPECT_EQ(1, memory_datasrc.getZoneCount());
+TEST_F(InMemoryClientTest, iterator) {
+    // Just some preparations of data
+    boost::shared_ptr<InMemoryZoneFinder>
+        zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+    RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+                                  RRTTL(300)));
+    aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+    RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+                                  RRTTL(300)));
+    aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+    aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+    RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+                                  RRTTL(300)));
+    subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+    // First, the zone is not there, so it should throw
+    EXPECT_THROW(memory_client.getIterator(Name("b")), DataSourceError);
+    // This zone is not there either, even when there's a zone containing this
+    EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+    // Now, an empty zone
+    ZoneIteratorPtr iterator(memory_client.getIterator(Name("a")));
+    EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+    // It throws Unexpected when we are past the end
+    EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+    EXPECT_EQ(result::SUCCESS, zone->add(aRRsetA));
+    EXPECT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+    EXPECT_EQ(result::SUCCESS, zone->add(subRRsetA));
+    // Check it with full zone, one by one.
+    // It should be in ascending order in case of InMemory data source
+    // (isn't guaranteed in general)
+    iterator = memory_client.getIterator(Name("a"));
+    EXPECT_EQ(aRRsetA, iterator->getNextRRset());
+    EXPECT_EQ(aRRsetAAAA, iterator->getNextRRset());
+    EXPECT_EQ(subRRsetA, iterator->getNextRRset());
+    EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
+TEST_F(InMemoryClientTest, iterator_separate_rrs) {
+    // Exactly the same tests as for iterator, but now with separate_rrs = true
+    // For the one that returns actual data, the AAAA should now be split up
+    boost::shared_ptr<InMemoryZoneFinder>
+        zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+    RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+                                  RRTTL(300)));
+    aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+    RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+                                  RRTTL(300)));
+    aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+    aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+    RRsetPtr aRRsetAAAA_r1(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+                                  RRTTL(300)));
+    aRRsetAAAA_r1->addRdata(rdata::in::AAAA("2001:db8::1"));
+    RRsetPtr aRRsetAAAA_r2(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+                                  RRTTL(300)));
+    aRRsetAAAA_r2->addRdata(rdata::in::AAAA("2001:db8::2"));
+
+    RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+                                  RRTTL(300)));
+    subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+
+    // First, the zone is not there, so it should throw
+    EXPECT_THROW(memory_client.getIterator(Name("b"), true), DataSourceError);
+    // This zone is not there either, even when there's a zone containing this
+    EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+    // Now, an empty zone
+    ZoneIteratorPtr iterator(memory_client.getIterator(Name("a"), true));
+    EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+    // It throws Unexpected when we are past the end
+    EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+
+    ASSERT_EQ(result::SUCCESS, zone->add(aRRsetA));
+    ASSERT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+    ASSERT_EQ(result::SUCCESS, zone->add(subRRsetA));
+    // Check it with full zone, one by one.
+    // It should be in ascending order in case of InMemory data source
+    // (isn't guaranteed in general)
+    iterator = memory_client.getIterator(Name("a"), true);
+    EXPECT_EQ(aRRsetA->toText(), iterator->getNextRRset()->toText());
+    EXPECT_EQ(aRRsetAAAA_r1->toText(), iterator->getNextRRset()->toText());
+    EXPECT_EQ(aRRsetAAAA_r2->toText(), iterator->getNextRRset()->toText());
+    EXPECT_EQ(subRRsetA->toText(), iterator->getNextRRset()->toText());
+    EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
+TEST_F(InMemoryClientTest, getZoneCount) {
+    EXPECT_EQ(0, memory_client.getZoneCount());
+    memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+                                                       Name("example.com"))));
+    EXPECT_EQ(1, memory_client.getZoneCount());
 
     // duplicate add.  counter shouldn't change
-    memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(rrclass, Name("example.com"))));
-    EXPECT_EQ(1, memory_datasrc.getZoneCount());
+    memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+                                                       Name("example.com"))));
+    EXPECT_EQ(1, memory_client.getZoneCount());
 
     // add one more
-    memory_datasrc.addZone(
-                  ZonePtr(new MemoryZone(rrclass, Name("example.org"))));
-    EXPECT_EQ(2, memory_datasrc.getZoneCount());
+    memory_client.addZone(
+                  ZoneFinderPtr(new InMemoryZoneFinder(rrclass,
+                                                       Name("example.org"))));
+    EXPECT_EQ(2, memory_client.getZoneCount());
+}
+
+TEST_F(InMemoryClientTest, startUpdateZone) {
+    EXPECT_THROW(memory_client.getUpdater(Name("example.org"), false),
+                 isc::NotImplemented);
 }
 
-// A helper callback of masterLoad() used in MemoryZoneTest.
+// A helper callback of masterLoad() used in InMemoryZoneFinderTest.
 void
 setRRset(RRsetPtr rrset, vector<RRsetPtr*>::iterator& it) {
     *(*it) = rrset;
     ++it;
 }
 
-/// \brief Test fixture for the MemoryZone class
-class MemoryZoneTest : public ::testing::Test {
+/// \brief Test fixture for the InMemoryZoneFinder class
+class InMemoryZoneFinderTest : public ::testing::Test {
     // A straightforward pair of textual RR(set) and a RRsetPtr variable
     // to store the RRset.  Used to build test data below.
     struct RRsetData {
@@ -162,10 +266,10 @@ class MemoryZoneTest : public ::testing::Test {
         RRsetPtr* rrset;
     };
 public:
-    MemoryZoneTest() :
+    InMemoryZoneFinderTest() :
         class_(RRClass::IN()),
         origin_("example.org"),
-        zone_(class_, origin_)
+        zone_finder_(class_, origin_)
     {
         // Build test RRsets.  Below, we construct an RRset for
         // each textual RR(s) of zone_data, and assign it to the corresponding
@@ -224,8 +328,8 @@ public:
     // Some data to test with
     const RRClass class_;
     const Name origin_;
-    // The zone to torture by tests
-    MemoryZone zone_;
+    // The zone finder to torture by tests
+    InMemoryZoneFinder zone_finder_;
 
     /*
      * Some RRsets to put inside the zone.
@@ -262,9 +366,9 @@ public:
     RRsetPtr rr_not_wild_another_;
 
     /**
-     * \brief Test one find query to the zone.
+     * \brief Test one find query to the zone finder.
      *
-     * Asks a query to the zone and checks it does not throw and returns
+     * Asks a query to the zone finder and checks it does not throw and returns
      * expected results. It returns nothing, it just signals failures
      * to GTEST.
      *
@@ -274,29 +378,31 @@ public:
      * \param check_answer Should a check against equality of the answer be
      *     done?
      * \param answer The expected rrset, if any should be returned.
-     * \param zone Check different MemoryZone object than zone_ (if NULL,
-     *     uses zone_)
+     * \param zone_finder Check different InMemoryZoneFinder object than
+     *     zone_finder_ (if NULL, uses zone_finder_)
      * \param check_wild_answer Checks that the answer has the same RRs, type
      *     class and TTL as the eqxpected answer and that the name corresponds
      *     to the one searched. It is meant for checking answers for wildcard
      *     queries.
      */
-    void findTest(const Name& name, const RRType& rrtype, Zone::Result result,
+    void findTest(const Name& name, const RRType& rrtype,
+                  ZoneFinder::Result result,
                   bool check_answer = true,
                   const ConstRRsetPtr& answer = ConstRRsetPtr(),
                   RRsetList* target = NULL,
-                  MemoryZone* zone = NULL,
-                  Zone::FindOptions options = Zone::FIND_DEFAULT,
+                  InMemoryZoneFinder* zone_finder = NULL,
+                  ZoneFinder::FindOptions options = ZoneFinder::FIND_DEFAULT,
                   bool check_wild_answer = false)
     {
-        if (!zone) {
-            zone = &zone_;
+        if (zone_finder == NULL) {
+            zone_finder = &zone_finder_;
         }
         // The whole block is inside, because we need to check the result and
         // we can't assign to FindResult
         EXPECT_NO_THROW({
-                Zone::FindResult find_result(zone->find(name, rrtype, target,
-                                                        options));
+                ZoneFinder::FindResult find_result(zone_finder->find(
+                                                       name, rrtype,
+                                                       target, options));
                 // Check it returns correct answers
                 EXPECT_EQ(result, find_result.code);
                 if (check_answer) {
@@ -337,14 +443,22 @@ public:
 };
 
 /**
- * \brief Test MemoryZone::MemoryZone constructor.
+ * \brief Check that findPreviousName throws as it should now.
+ */
+TEST_F(InMemoryZoneFinderTest, findPreviousName) {
+    EXPECT_THROW(zone_finder_.findPreviousName(Name("www.example.org")),
+                 isc::NotImplemented);
+}
+
+/**
+ * \brief Test InMemoryZoneFinder::InMemoryZoneFinder constructor.
  *
- * Takes the created zone and checks its properties they are the same
+ * Takes the created zone finder and checks its properties they are the same
  * as passed parameters.
  */
-TEST_F(MemoryZoneTest, constructor) {
-    ASSERT_EQ(class_, zone_.getClass());
-    ASSERT_EQ(origin_, zone_.getOrigin());
+TEST_F(InMemoryZoneFinderTest, constructor) {
+    ASSERT_EQ(class_, zone_finder_.getClass());
+    ASSERT_EQ(origin_, zone_finder_.getOrigin());
 }
 /**
  * \brief Test adding.
@@ -352,174 +466,178 @@ TEST_F(MemoryZoneTest, constructor) {
  * We test that it throws at the correct moments and the correct exceptions.
  * And we test the return value.
  */
-TEST_F(MemoryZoneTest, add) {
+TEST_F(InMemoryZoneFinderTest, add) {
     // This one does not belong to this zone
-    EXPECT_THROW(zone_.add(rr_out_), MemoryZone::OutOfZone);
+    EXPECT_THROW(zone_finder_.add(rr_out_), InMemoryZoneFinder::OutOfZone);
     // Test null pointer
-    EXPECT_THROW(zone_.add(ConstRRsetPtr()), MemoryZone::NullRRset);
+    EXPECT_THROW(zone_finder_.add(ConstRRsetPtr()),
+                 InMemoryZoneFinder::NullRRset);
 
     // Now put all the data we have there. It should throw nothing
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_a_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_aaaa_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_a_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_aaaa_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
 
     // Try putting there something twice, it should be rejected
-    EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_.add(rr_ns_)));
-    EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_.add(rr_ns_a_)));
+    EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_finder_.add(rr_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(EXIST, zone_finder_.add(rr_ns_a_)));
 }
 
-TEST_F(MemoryZoneTest, addMultipleCNAMEs) {
+TEST_F(InMemoryZoneFinderTest, addMultipleCNAMEs) {
     rr_cname_->addRdata(generic::CNAME("canonical2.example.org."));
-    EXPECT_THROW(zone_.add(rr_cname_), MemoryZone::AddError);
+    EXPECT_THROW(zone_finder_.add(rr_cname_), InMemoryZoneFinder::AddError);
 }
 
-TEST_F(MemoryZoneTest, addCNAMEThenOther) {
-    EXPECT_EQ(SUCCESS, zone_.add(rr_cname_));
-    EXPECT_THROW(zone_.add(rr_cname_a_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addCNAMEThenOther) {
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_));
+    EXPECT_THROW(zone_finder_.add(rr_cname_a_), InMemoryZoneFinder::AddError);
 }
 
-TEST_F(MemoryZoneTest, addOtherThenCNAME) {
-    EXPECT_EQ(SUCCESS, zone_.add(rr_cname_a_));
-    EXPECT_THROW(zone_.add(rr_cname_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addOtherThenCNAME) {
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_a_));
+    EXPECT_THROW(zone_finder_.add(rr_cname_), InMemoryZoneFinder::AddError);
 }
 
-TEST_F(MemoryZoneTest, findCNAME) {
+TEST_F(InMemoryZoneFinderTest, findCNAME) {
     // install CNAME RR
-    EXPECT_EQ(SUCCESS, zone_.add(rr_cname_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_));
 
     // Find A RR of the same.  Should match the CNAME
-    findTest(rr_cname_->getName(), RRType::NS(), Zone::CNAME, true, rr_cname_);
+    findTest(rr_cname_->getName(), RRType::NS(), ZoneFinder::CNAME, true,
+             rr_cname_);
 
     // Find the CNAME itself.  Should result in normal SUCCESS
-    findTest(rr_cname_->getName(), RRType::CNAME(), Zone::SUCCESS, true,
+    findTest(rr_cname_->getName(), RRType::CNAME(), ZoneFinder::SUCCESS, true,
              rr_cname_);
 }
 
-TEST_F(MemoryZoneTest, findCNAMEUnderZoneCut) {
+TEST_F(InMemoryZoneFinderTest, findCNAMEUnderZoneCut) {
     // There's nothing special when we find a CNAME under a zone cut
     // (with FIND_GLUE_OK).  The behavior is different from BIND 9,
     // so we test this case explicitly.
-    EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_));
     RRsetPtr rr_cname_under_cut_(new RRset(Name("cname.child.example.org"),
                                            class_, RRType::CNAME(),
                                            RRTTL(300)));
-    EXPECT_EQ(SUCCESS, zone_.add(rr_cname_under_cut_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_cname_under_cut_));
     findTest(Name("cname.child.example.org"), RRType::AAAA(),
-             Zone::CNAME, true, rr_cname_under_cut_, NULL, NULL,
-             Zone::FIND_GLUE_OK);
+             ZoneFinder::CNAME, true, rr_cname_under_cut_, NULL, NULL,
+             ZoneFinder::FIND_GLUE_OK);
 }
 
 // Two DNAMEs at single domain are disallowed by RFC 2672, section 3)
 // Having a CNAME there is disallowed too, but it is tested by
 // addOtherThenCNAME and addCNAMEThenOther.
-TEST_F(MemoryZoneTest, addMultipleDNAMEs) {
+TEST_F(InMemoryZoneFinderTest, addMultipleDNAMEs) {
     rr_dname_->addRdata(generic::DNAME("target2.example.org."));
-    EXPECT_THROW(zone_.add(rr_dname_), MemoryZone::AddError);
+    EXPECT_THROW(zone_finder_.add(rr_dname_), InMemoryZoneFinder::AddError);
 }
 
 /*
  * These two tests ensure that we can't have DNAME and NS at the same
  * node with the exception of the apex of zone (forbidden by RFC 2672)
  */
-TEST_F(MemoryZoneTest, addDNAMEThenNS) {
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
-    EXPECT_THROW(zone_.add(rr_dname_ns_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addDNAMEThenNS) {
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+    EXPECT_THROW(zone_finder_.add(rr_dname_ns_), InMemoryZoneFinder::AddError);
 }
 
-TEST_F(MemoryZoneTest, addNSThenDNAME) {
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_ns_)));
-    EXPECT_THROW(zone_.add(rr_dname_), MemoryZone::AddError);
+TEST_F(InMemoryZoneFinderTest, addNSThenDNAME) {
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_ns_)));
+    EXPECT_THROW(zone_finder_.add(rr_dname_), InMemoryZoneFinder::AddError);
 }
 
 // It is allowed to have NS and DNAME at apex
-TEST_F(MemoryZoneTest, DNAMEAndNSAtApex) {
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_apex_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
+TEST_F(InMemoryZoneFinderTest, DNAMEAndNSAtApex) {
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_apex_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
 
     // The NS should be possible to be found, below should be DNAME, not
     // delegation
-    findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
-    findTest(rr_child_ns_->getName(), RRType::A(), Zone::DNAME, true,
+    findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
+    findTest(rr_child_ns_->getName(), RRType::A(), ZoneFinder::DNAME, true,
              rr_dname_apex_);
 }
 
-TEST_F(MemoryZoneTest, NSAndDNAMEAtApex) {
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_apex_)));
+TEST_F(InMemoryZoneFinderTest, NSAndDNAMEAtApex) {
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_apex_)));
 }
 
 // TODO: Test (and implement) adding data under DNAME. That is forbidden by
 // 2672 as well.
 
 // Search under a DNAME record. It should return the DNAME
-TEST_F(MemoryZoneTest, findBelowDNAME) {
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
-    findTest(Name("below.dname.example.org"), RRType::A(), Zone::DNAME, true,
-        rr_dname_);
+TEST_F(InMemoryZoneFinderTest, findBelowDNAME) {
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+    findTest(Name("below.dname.example.org"), RRType::A(), ZoneFinder::DNAME,
+             true, rr_dname_);
 }
 
 // Search at the domain with DNAME. It should act as DNAME isn't there, DNAME
 // influences only the data below (see RFC 2672, section 3)
-TEST_F(MemoryZoneTest, findAtDNAME) {
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_dname_a_)));
+TEST_F(InMemoryZoneFinderTest, findAtDNAME) {
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_dname_a_)));
 
     const Name dname_name(rr_dname_->getName());
-    findTest(dname_name, RRType::A(), Zone::SUCCESS, true, rr_dname_a_);
-    findTest(dname_name, RRType::DNAME(), Zone::SUCCESS, true, rr_dname_);
-    findTest(dname_name, RRType::TXT(), Zone::NXRRSET, true);
+    findTest(dname_name, RRType::A(), ZoneFinder::SUCCESS, true, rr_dname_a_);
+    findTest(dname_name, RRType::DNAME(), ZoneFinder::SUCCESS, true,
+             rr_dname_);
+    findTest(dname_name, RRType::TXT(), ZoneFinder::NXRRSET, true);
 }
 
 // Try searching something that is both under NS and DNAME, without and with
 // GLUE_OK mode (it should stop at the NS and DNAME respectively).
-TEST_F(MemoryZoneTest, DNAMEUnderNS) {
-    zone_.add(rr_child_ns_);
-    zone_.add(rr_child_dname_);
+TEST_F(InMemoryZoneFinderTest, DNAMEUnderNS) {
+    zone_finder_.add(rr_child_ns_);
+    zone_finder_.add(rr_child_dname_);
 
     Name lowName("below.dname.child.example.org.");
 
-    findTest(lowName, RRType::A(), Zone::DELEGATION, true, rr_child_ns_);
-    findTest(lowName, RRType::A(), Zone::DNAME, true, rr_child_dname_, NULL,
-        NULL, Zone::FIND_GLUE_OK);
+    findTest(lowName, RRType::A(), ZoneFinder::DELEGATION, true, rr_child_ns_);
+    findTest(lowName, RRType::A(), ZoneFinder::DNAME, true, rr_child_dname_,
+             NULL, NULL, ZoneFinder::FIND_GLUE_OK);
 }
 
 // Test adding child zones and zone cut handling
-TEST_F(MemoryZoneTest, delegationNS) {
+TEST_F(InMemoryZoneFinderTest, delegationNS) {
     // add in-zone data
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
 
     // install a zone cut
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
 
     // below the zone cut
-    findTest(Name("www.child.example.org"), RRType::A(), Zone::DELEGATION,
-             true, rr_child_ns_);
+    findTest(Name("www.child.example.org"), RRType::A(),
+             ZoneFinder::DELEGATION, true, rr_child_ns_);
 
     // at the zone cut
-    findTest(Name("child.example.org"), RRType::A(), Zone::DELEGATION,
+    findTest(Name("child.example.org"), RRType::A(), ZoneFinder::DELEGATION,
              true, rr_child_ns_);
-    findTest(Name("child.example.org"), RRType::NS(), Zone::DELEGATION,
+    findTest(Name("child.example.org"), RRType::NS(), ZoneFinder::DELEGATION,
              true, rr_child_ns_);
 
     // finding NS for the apex (origin) node.  This must not be confused
     // with delegation due to the existence of an NS RR.
-    findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
+    findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
 
     // unusual case of "nested delegation": the highest cut should be used.
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_ns_)));
     findTest(Name("www.grand.child.example.org"), RRType::A(),
-             Zone::DELEGATION, true, rr_child_ns_); // note: !rr_grandchild_ns_
+             // note: !rr_grandchild_ns_
+             ZoneFinder::DELEGATION, true, rr_child_ns_);
 }
 
-TEST_F(MemoryZoneTest, findAny) {
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_glue_)));
+TEST_F(InMemoryZoneFinderTest, findAny) {
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_glue_)));
 
     // origin
     RRsetList origin_rrsets;
-    findTest(origin_, RRType::ANY(), Zone::SUCCESS, true,
+    findTest(origin_, RRType::ANY(), ZoneFinder::SUCCESS, true,
              ConstRRsetPtr(), &origin_rrsets);
     EXPECT_EQ(2, origin_rrsets.size());
     EXPECT_EQ(rr_a_, origin_rrsets.findRRset(RRType::A(), RRClass::IN()));
@@ -527,13 +645,13 @@ TEST_F(MemoryZoneTest, findAny) {
 
     // out zone name
     RRsetList out_rrsets;
-    findTest(Name("example.com"), RRType::ANY(), Zone::NXDOMAIN, true,
+    findTest(Name("example.com"), RRType::ANY(), ZoneFinder::NXDOMAIN, true,
              ConstRRsetPtr(), &out_rrsets);
     EXPECT_EQ(0, out_rrsets.size());
 
     RRsetList glue_child_rrsets;
-    findTest(rr_child_glue_->getName(), RRType::ANY(), Zone::SUCCESS, true,
-                ConstRRsetPtr(), &glue_child_rrsets);
+    findTest(rr_child_glue_->getName(), RRType::ANY(), ZoneFinder::SUCCESS,
+             true, ConstRRsetPtr(), &glue_child_rrsets);
     EXPECT_EQ(rr_child_glue_, glue_child_rrsets.findRRset(RRType::A(),
                                                      RRClass::IN()));
     EXPECT_EQ(1, glue_child_rrsets.size());
@@ -542,59 +660,60 @@ TEST_F(MemoryZoneTest, findAny) {
     // been implemented
 
     // add zone cut
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
 
     // zone cut
     RRsetList child_rrsets;
-    findTest(rr_child_ns_->getName(), RRType::ANY(), Zone::DELEGATION, true,
-             rr_child_ns_, &child_rrsets);
+    findTest(rr_child_ns_->getName(), RRType::ANY(), ZoneFinder::DELEGATION,
+             true, rr_child_ns_, &child_rrsets);
     EXPECT_EQ(0, child_rrsets.size());
 
     // glue for this zone cut
     RRsetList new_glue_child_rrsets;
-    findTest(rr_child_glue_->getName(), RRType::ANY(), Zone::DELEGATION, true,
-                rr_child_ns_, &new_glue_child_rrsets);
+    findTest(rr_child_glue_->getName(), RRType::ANY(), ZoneFinder::DELEGATION,
+             true, rr_child_ns_, &new_glue_child_rrsets);
     EXPECT_EQ(0, new_glue_child_rrsets.size());
 }
 
-TEST_F(MemoryZoneTest, glue) {
+TEST_F(InMemoryZoneFinderTest, glue) {
     // install zone data:
     // a zone cut
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_)));
     // glue for this cut
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_child_glue_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_glue_)));
     // a nested zone cut (unusual)
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_ns_)));
     // glue under the deeper zone cut
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_grandchild_glue_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_grandchild_glue_)));
 
     // by default glue is hidden due to the zone cut
-    findTest(rr_child_glue_->getName(), RRType::A(), Zone::DELEGATION, true,
-             rr_child_ns_);
+    findTest(rr_child_glue_->getName(), RRType::A(), ZoneFinder::DELEGATION,
+             true, rr_child_ns_);
 
 
     // If we do it in the "glue OK" mode, we should find the exact match.
-    findTest(rr_child_glue_->getName(), RRType::A(), Zone::SUCCESS, true,
-             rr_child_glue_, NULL, NULL, Zone::FIND_GLUE_OK);
+    findTest(rr_child_glue_->getName(), RRType::A(), ZoneFinder::SUCCESS, true,
+             rr_child_glue_, NULL, NULL, ZoneFinder::FIND_GLUE_OK);
 
     // glue OK + NXRRSET case
-    findTest(rr_child_glue_->getName(), RRType::AAAA(), Zone::NXRRSET, true,
-             ConstRRsetPtr(), NULL, NULL, Zone::FIND_GLUE_OK);
+    findTest(rr_child_glue_->getName(), RRType::AAAA(), ZoneFinder::NXRRSET,
+             true, ConstRRsetPtr(), NULL, NULL, ZoneFinder::FIND_GLUE_OK);
 
     // glue OK + NXDOMAIN case
-    findTest(Name("www.child.example.org"), RRType::A(), Zone::DELEGATION,
-             true, rr_child_ns_, NULL, NULL, Zone::FIND_GLUE_OK);
+    findTest(Name("www.child.example.org"), RRType::A(),
+             ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+             ZoneFinder::FIND_GLUE_OK);
 
     // nested cut case.  The glue should be found.
     findTest(rr_grandchild_glue_->getName(), RRType::AAAA(),
-             Zone::SUCCESS,
-             true, rr_grandchild_glue_, NULL, NULL, Zone::FIND_GLUE_OK);
+             ZoneFinder::SUCCESS,
+             true, rr_grandchild_glue_, NULL, NULL, ZoneFinder::FIND_GLUE_OK);
 
     // A non-existent name in nested cut.  This should result in delegation
     // at the highest zone cut.
     findTest(Name("www.grand.child.example.org"), RRType::TXT(),
-             Zone::DELEGATION, true, rr_child_ns_, NULL, NULL,
-             Zone::FIND_GLUE_OK);
+             ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+             ZoneFinder::FIND_GLUE_OK);
 }
 
 /**
@@ -604,28 +723,29 @@ TEST_F(MemoryZoneTest, glue) {
  * \todo This doesn't do any kind of CNAME and so on. If it isn't
  *     directly there, it just tells it doesn't exist.
  */
-TEST_F(MemoryZoneTest, find) {
+TEST_F(InMemoryZoneFinderTest, find) {
     // Fill some data inside
     // Now put all the data we have there. It should throw nothing
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_a_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_ns_aaaa_)));
-    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_.add(rr_a_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_a_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_ns_aaaa_)));
+    EXPECT_NO_THROW(EXPECT_EQ(SUCCESS, zone_finder_.add(rr_a_)));
 
     // These two should be successful
-    findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
-    findTest(rr_ns_a_->getName(), RRType::A(), Zone::SUCCESS, true, rr_ns_a_);
+    findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
+    findTest(rr_ns_a_->getName(), RRType::A(), ZoneFinder::SUCCESS, true,
+             rr_ns_a_);
 
     // These domain exist but don't have the provided RRType
-    findTest(origin_, RRType::AAAA(), Zone::NXRRSET);
-    findTest(rr_ns_a_->getName(), RRType::NS(), Zone::NXRRSET);
+    findTest(origin_, RRType::AAAA(), ZoneFinder::NXRRSET);
+    findTest(rr_ns_a_->getName(), RRType::NS(), ZoneFinder::NXRRSET);
 
     // These domains don't exist (and one is out of the zone)
-    findTest(Name("nothere.example.org"), RRType::A(), Zone::NXDOMAIN);
-    findTest(Name("example.net"), RRType::A(), Zone::NXDOMAIN);
+    findTest(Name("nothere.example.org"), RRType::A(), ZoneFinder::NXDOMAIN);
+    findTest(Name("example.net"), RRType::A(), ZoneFinder::NXDOMAIN);
 }
 
-TEST_F(MemoryZoneTest, emptyNode) {
+TEST_F(InMemoryZoneFinderTest, emptyNode) {
     /*
      * The backend RBTree for this test should look like as follows:
      *          example.org
@@ -645,52 +765,53 @@ TEST_F(MemoryZoneTest, emptyNode) {
     for (int i = 0; names[i] != NULL; ++i) {
         ConstRRsetPtr rrset(new RRset(Name(names[i]), class_, RRType::A(),
                                       RRTTL(300)));
-        EXPECT_EQ(SUCCESS, zone_.add(rrset));
+        EXPECT_EQ(SUCCESS, zone_finder_.add(rrset));
     }
 
     // empty node matching, easy case: the node for 'baz' exists with
     // no data.
-    findTest(Name("baz.example.org"), RRType::A(), Zone::NXRRSET);
+    findTest(Name("baz.example.org"), RRType::A(), ZoneFinder::NXRRSET);
 
     // empty node matching, a trickier case: the node for 'foo' is part of
     // "x.foo", which should be considered an empty node.
-    findTest(Name("foo.example.org"), RRType::A(), Zone::NXRRSET);
+    findTest(Name("foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
 
     // "org" is contained in "example.org", but it shouldn't be treated as
     // NXRRSET because it's out of zone.
     // Note: basically we don't expect such a query to be performed (the common
     // operation is to identify the best matching zone first then perform
     // search it), but we shouldn't be confused even in the unexpected case.
-    findTest(Name("org"), RRType::A(), Zone::NXDOMAIN);
+    findTest(Name("org"), RRType::A(), ZoneFinder::NXDOMAIN);
 }
 
-TEST_F(MemoryZoneTest, load) {
+TEST_F(InMemoryZoneFinderTest, load) {
     // Put some data inside the zone
-    EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, zone_.add(rr_ns_)));
+    EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, zone_finder_.add(rr_ns_)));
     // Loading with different origin should fail
-    EXPECT_THROW(zone_.load(TEST_DATA_DIR "/root.zone"), MasterLoadError);
+    EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/root.zone"),
+                 MasterLoadError);
     // See the original data is still there, survived the exception
-    findTest(origin_, RRType::NS(), Zone::SUCCESS, true, rr_ns_);
+    findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, true, rr_ns_);
     // Create correct zone
-    MemoryZone rootzone(class_, Name("."));
+    InMemoryZoneFinder rootzone(class_, Name("."));
     // Try putting something inside
     EXPECT_NO_THROW(EXPECT_EQ(result::SUCCESS, rootzone.add(rr_ns_aaaa_)));
     // Load the zone. It should overwrite/remove the above RRset
     EXPECT_NO_THROW(rootzone.load(TEST_DATA_DIR "/root.zone"));
 
     // Now see there are some rrsets (we don't look inside, though)
-    findTest(Name("."), RRType::SOA(), Zone::SUCCESS, false, ConstRRsetPtr(),
-        NULL, &rootzone);
-    findTest(Name("."), RRType::NS(), Zone::SUCCESS, false, ConstRRsetPtr(),
-        NULL, &rootzone);
-    findTest(Name("a.root-servers.net."), RRType::A(), Zone::SUCCESS, false,
-        ConstRRsetPtr(), NULL, &rootzone);
+    findTest(Name("."), RRType::SOA(), ZoneFinder::SUCCESS, false,
+             ConstRRsetPtr(), NULL, &rootzone);
+    findTest(Name("."), RRType::NS(), ZoneFinder::SUCCESS, false,
+             ConstRRsetPtr(), NULL, &rootzone);
+    findTest(Name("a.root-servers.net."), RRType::A(), ZoneFinder::SUCCESS,
+             false, ConstRRsetPtr(), NULL, &rootzone);
     // But this should no longer be here
-    findTest(rr_ns_a_->getName(), RRType::AAAA(), Zone::NXDOMAIN, true,
+    findTest(rr_ns_a_->getName(), RRType::AAAA(), ZoneFinder::NXDOMAIN, true,
              ConstRRsetPtr(), NULL, &rootzone);
 
     // Try loading zone that is wrong in a different way
-    EXPECT_THROW(zone_.load(TEST_DATA_DIR "/duplicate_rrset.zone"),
+    EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/duplicate_rrset.zone"),
         MasterLoadError);
 }
 
@@ -698,7 +819,7 @@ TEST_F(MemoryZoneTest, load) {
  * Test that puts a (simple) wildcard into the zone and checks we can
  * correctly find the data.
  */
-TEST_F(MemoryZoneTest, wildcard) {
+TEST_F(InMemoryZoneFinderTest, wildcard) {
     /*
      *            example.org.
      *                 |
@@ -706,40 +827,41 @@ TEST_F(MemoryZoneTest, wildcard) {
      *                 |
      *                 *
      */
-    EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
 
     // Search at the parent. The parent will not have the A, but it will
     // be in the wildcard (so check the wildcard isn't matched at the parent)
     {
         SCOPED_TRACE("Search at parrent");
-        findTest(Name("wild.example.org"), RRType::A(), Zone::NXRRSET);
+        findTest(Name("wild.example.org"), RRType::A(), ZoneFinder::NXRRSET);
     }
 
     // Search the original name of wildcard
     {
         SCOPED_TRACE("Search directly at *");
-        findTest(Name("*.wild.example.org"), RRType::A(), Zone::SUCCESS, true,
-            rr_wild_);
+        findTest(Name("*.wild.example.org"), RRType::A(), ZoneFinder::SUCCESS,
+                 true, rr_wild_);
     }
     // Search "created" name.
     {
         SCOPED_TRACE("Search at created child");
-        findTest(Name("a.wild.example.org"), RRType::A(), Zone::SUCCESS, false,
-            rr_wild_, NULL, NULL, Zone::FIND_DEFAULT, true);
+        findTest(Name("a.wild.example.org"), RRType::A(), ZoneFinder::SUCCESS,
+                 false, rr_wild_, NULL, NULL, ZoneFinder::FIND_DEFAULT, true);
     }
 
     // Search another created name, this time little bit lower
     {
         SCOPED_TRACE("Search at created grand-child");
-        findTest(Name("a.b.wild.example.org"), RRType::A(), Zone::SUCCESS,
-            false, rr_wild_, NULL, NULL, Zone::FIND_DEFAULT, true);
+        findTest(Name("a.b.wild.example.org"), RRType::A(),
+                 ZoneFinder::SUCCESS, false, rr_wild_, NULL, NULL,
+                 ZoneFinder::FIND_DEFAULT, true);
     }
 
-    EXPECT_EQ(SUCCESS, zone_.add(rr_under_wild_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_under_wild_));
     {
         SCOPED_TRACE("Search under non-wildcard");
         findTest(Name("bar.foo.wild.example.org"), RRType::A(),
-            Zone::NXDOMAIN);
+            ZoneFinder::NXDOMAIN);
     }
 }
 
@@ -750,33 +872,34 @@ TEST_F(MemoryZoneTest, wildcard) {
  *   - When the query is in another zone.  That is, delegation cancels
  *     the wildcard defaults."
  */
-TEST_F(MemoryZoneTest, delegatedWildcard) {
-    EXPECT_EQ(SUCCESS, zone_.add(rr_child_wild_));
-    EXPECT_EQ(SUCCESS, zone_.add(rr_child_ns_));
+TEST_F(InMemoryZoneFinderTest, delegatedWildcard) {
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_wild_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_child_ns_));
 
     {
         SCOPED_TRACE("Looking under delegation point");
-        findTest(Name("a.child.example.org"), RRType::A(), Zone::DELEGATION,
-            true, rr_child_ns_);
+        findTest(Name("a.child.example.org"), RRType::A(),
+                 ZoneFinder::DELEGATION, true, rr_child_ns_);
     }
 
     {
         SCOPED_TRACE("Looking under delegation point in GLUE_OK mode");
-        findTest(Name("a.child.example.org"), RRType::A(), Zone::DELEGATION,
-            true, rr_child_ns_, NULL, NULL, Zone::FIND_GLUE_OK);
+        findTest(Name("a.child.example.org"), RRType::A(),
+                 ZoneFinder::DELEGATION, true, rr_child_ns_, NULL, NULL,
+                 ZoneFinder::FIND_GLUE_OK);
     }
 }
 
 // Tests combination of wildcard and ANY.
-TEST_F(MemoryZoneTest, anyWildcard) {
-    EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
+TEST_F(InMemoryZoneFinderTest, anyWildcard) {
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
 
     // First try directly the name (normal match)
     {
         SCOPED_TRACE("Asking direcly for *");
         RRsetList target;
-        findTest(Name("*.wild.example.org"), RRType::ANY(), Zone::SUCCESS,
-            true, ConstRRsetPtr(), &target);
+        findTest(Name("*.wild.example.org"), RRType::ANY(),
+                 ZoneFinder::SUCCESS, true, ConstRRsetPtr(), &target);
         ASSERT_EQ(1, target.size());
         EXPECT_EQ(RRType::A(), (*target.begin())->getType());
         EXPECT_EQ(Name("*.wild.example.org"), (*target.begin())->getName());
@@ -786,8 +909,8 @@ TEST_F(MemoryZoneTest, anyWildcard) {
     {
         SCOPED_TRACE("Asking in the wild way");
         RRsetList target;
-        findTest(Name("a.wild.example.org"), RRType::ANY(), Zone::SUCCESS,
-            true, ConstRRsetPtr(), &target);
+        findTest(Name("a.wild.example.org"), RRType::ANY(),
+                 ZoneFinder::SUCCESS, true, ConstRRsetPtr(), &target);
         ASSERT_EQ(1, target.size());
         EXPECT_EQ(RRType::A(), (*target.begin())->getType());
         EXPECT_EQ(Name("a.wild.example.org"), (*target.begin())->getName());
@@ -796,56 +919,56 @@ TEST_F(MemoryZoneTest, anyWildcard) {
 
 // Test there's nothing in the wildcard in the middle if we load
 // wild.*.foo.example.org.
-TEST_F(MemoryZoneTest, emptyWildcard) {
+TEST_F(InMemoryZoneFinderTest, emptyWildcard) {
     /*
      *            example.org.
      *                foo
      *                 *
      *               wild
      */
-    EXPECT_EQ(SUCCESS, zone_.add(rr_emptywild_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_emptywild_));
 
     {
         SCOPED_TRACE("Asking for the original record under wildcard");
-        findTest(Name("wild.*.foo.example.org"), RRType::A(), Zone::SUCCESS,
-            true, rr_emptywild_);
+        findTest(Name("wild.*.foo.example.org"), RRType::A(),
+                 ZoneFinder::SUCCESS, true, rr_emptywild_);
     }
 
     {
         SCOPED_TRACE("Asking for A record");
-        findTest(Name("a.foo.example.org"), RRType::A(), Zone::NXRRSET);
-        findTest(Name("*.foo.example.org"), RRType::A(), Zone::NXRRSET);
-        findTest(Name("foo.example.org"), RRType::A(), Zone::NXRRSET);
+        findTest(Name("a.foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
+        findTest(Name("*.foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
+        findTest(Name("foo.example.org"), RRType::A(), ZoneFinder::NXRRSET);
     }
 
     {
         SCOPED_TRACE("Asking for ANY record");
         RRsetList normalTarget;
-        findTest(Name("*.foo.example.org"), RRType::ANY(), Zone::NXRRSET, true,
-            ConstRRsetPtr(), &normalTarget);
+        findTest(Name("*.foo.example.org"), RRType::ANY(), ZoneFinder::NXRRSET,
+                 true, ConstRRsetPtr(), &normalTarget);
         EXPECT_EQ(0, normalTarget.size());
 
         RRsetList wildTarget;
-        findTest(Name("a.foo.example.org"), RRType::ANY(), Zone::NXRRSET, true,
-            ConstRRsetPtr(), &wildTarget);
+        findTest(Name("a.foo.example.org"), RRType::ANY(),
+                 ZoneFinder::NXRRSET, true, ConstRRsetPtr(), &wildTarget);
         EXPECT_EQ(0, wildTarget.size());
     }
 
     {
         SCOPED_TRACE("Asking on the non-terminal");
         findTest(Name("wild.bar.foo.example.org"), RRType::A(),
-            Zone::NXRRSET);
+            ZoneFinder::NXRRSET);
     }
 }
 
 // Same as emptyWildcard, but with multiple * in the path.
-TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
-    EXPECT_EQ(SUCCESS, zone_.add(rr_nested_emptywild_));
+TEST_F(InMemoryZoneFinderTest, nestedEmptyWildcard) {
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_nested_emptywild_));
 
     {
         SCOPED_TRACE("Asking for the original record under wildcards");
         findTest(Name("wild.*.foo.*.bar.example.org"), RRType::A(),
-            Zone::SUCCESS, true, rr_nested_emptywild_);
+            ZoneFinder::SUCCESS, true, rr_nested_emptywild_);
     }
 
     {
@@ -860,7 +983,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
 
         for (const char** name(names); *name != NULL; ++ name) {
             SCOPED_TRACE(string("Node ") + *name);
-            findTest(Name(*name), RRType::A(), Zone::NXRRSET);
+            findTest(Name(*name), RRType::A(), ZoneFinder::NXRRSET);
         }
     }
 
@@ -878,7 +1001,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
 
         for (const char** name(names); *name != NULL; ++ name) {
             SCOPED_TRACE(string("Node ") + *name);
-            findTest(Name(*name), RRType::A(), Zone::NXRRSET);
+            findTest(Name(*name), RRType::A(), ZoneFinder::NXRRSET);
         }
     }
 
@@ -889,7 +1012,7 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
             SCOPED_TRACE(string("Node ") + *name);
 
             RRsetList target;
-            findTest(Name(*name), RRType::ANY(), Zone::NXRRSET, true,
+            findTest(Name(*name), RRType::ANY(), ZoneFinder::NXRRSET, true,
                 ConstRRsetPtr(), &target);
             EXPECT_EQ(0, target.size());
         }
@@ -899,21 +1022,21 @@ TEST_F(MemoryZoneTest, nestedEmptyWildcard) {
 // We run this part twice from the below test, in two slightly different
 // situations
 void
-MemoryZoneTest::doCancelWildcardTest() {
+InMemoryZoneFinderTest::doCancelWildcardTest() {
     // These should be canceled
     {
         SCOPED_TRACE("Canceled under foo.wild.example.org");
         findTest(Name("aaa.foo.wild.example.org"), RRType::A(),
-            Zone::NXDOMAIN);
+            ZoneFinder::NXDOMAIN);
         findTest(Name("zzz.foo.wild.example.org"), RRType::A(),
-            Zone::NXDOMAIN);
+            ZoneFinder::NXDOMAIN);
     }
 
     // This is existing, non-wildcard domain, shouldn't wildcard at all
     {
         SCOPED_TRACE("Existing domain under foo.wild.example.org");
-        findTest(Name("bar.foo.wild.example.org"), RRType::A(), Zone::SUCCESS,
-            true, rr_not_wild_);
+        findTest(Name("bar.foo.wild.example.org"), RRType::A(),
+                 ZoneFinder::SUCCESS, true, rr_not_wild_);
     }
 
     // These should be caught by the wildcard
@@ -930,15 +1053,16 @@ MemoryZoneTest::doCancelWildcardTest() {
         for (const char** name(names); *name != NULL; ++ name) {
             SCOPED_TRACE(string("Node ") + *name);
 
-            findTest(Name(*name), RRType::A(), Zone::SUCCESS, false, rr_wild_,
-                NULL, NULL, Zone::FIND_DEFAULT, true);
+            findTest(Name(*name), RRType::A(), ZoneFinder::SUCCESS, false,
+                     rr_wild_, NULL, NULL, ZoneFinder::FIND_DEFAULT, true);
         }
     }
 
     // This shouldn't be wildcarded, it's an existing domain
     {
         SCOPED_TRACE("The foo.wild.example.org itself");
-        findTest(Name("foo.wild.example.org"), RRType::A(), Zone::NXRRSET);
+        findTest(Name("foo.wild.example.org"), RRType::A(),
+                 ZoneFinder::NXRRSET);
     }
 }
 
@@ -952,9 +1076,9 @@ MemoryZoneTest::doCancelWildcardTest() {
  * Tests few cases "around" the canceled wildcard match, to see something that
  * shouldn't be canceled isn't.
  */
-TEST_F(MemoryZoneTest, cancelWildcard) {
-    EXPECT_EQ(SUCCESS, zone_.add(rr_wild_));
-    EXPECT_EQ(SUCCESS, zone_.add(rr_not_wild_));
+TEST_F(InMemoryZoneFinderTest, cancelWildcard) {
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_wild_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_));
 
     {
         SCOPED_TRACE("Runnig with single entry under foo.wild.example.org");
@@ -964,61 +1088,63 @@ TEST_F(MemoryZoneTest, cancelWildcard) {
     // Try putting another one under foo.wild....
     // The result should be the same but it will be done in another way in the
     // code, because the foo.wild.example.org will exist in the tree.
-    EXPECT_EQ(SUCCESS, zone_.add(rr_not_wild_another_));
+    EXPECT_EQ(SUCCESS, zone_finder_.add(rr_not_wild_another_));
     {
         SCOPED_TRACE("Runnig with two entries under foo.wild.example.org");
         doCancelWildcardTest();
     }
 }
 
-TEST_F(MemoryZoneTest, loadBadWildcard) {
+TEST_F(InMemoryZoneFinderTest, loadBadWildcard) {
     // We reject loading the zone if it contains a wildcard name for
     // NS or DNAME.
-    EXPECT_THROW(zone_.add(rr_nswild_), MemoryZone::AddError);
-    EXPECT_THROW(zone_.add(rr_dnamewild_), MemoryZone::AddError);
+    EXPECT_THROW(zone_finder_.add(rr_nswild_), InMemoryZoneFinder::AddError);
+    EXPECT_THROW(zone_finder_.add(rr_dnamewild_),
+                 InMemoryZoneFinder::AddError);
 }
 
-TEST_F(MemoryZoneTest, swap) {
-    // build one zone with some data
-    MemoryZone zone1(class_, origin_);
-    EXPECT_EQ(result::SUCCESS, zone1.add(rr_ns_));
-    EXPECT_EQ(result::SUCCESS, zone1.add(rr_ns_aaaa_));
+TEST_F(InMemoryZoneFinderTest, swap) {
+    // build one zone finder with some data
+    InMemoryZoneFinder finder1(class_, origin_);
+    EXPECT_EQ(result::SUCCESS, finder1.add(rr_ns_));
+    EXPECT_EQ(result::SUCCESS, finder1.add(rr_ns_aaaa_));
 
-    // build another zone of a different RR class with some other data
+    // build another zone finder of a different RR class with some other data
     const Name other_origin("version.bind");
     ASSERT_NE(origin_, other_origin); // make sure these two are different
-    MemoryZone zone2(RRClass::CH(), other_origin);
+    InMemoryZoneFinder finder2(RRClass::CH(), other_origin);
     EXPECT_EQ(result::SUCCESS,
-              zone2.add(RRsetPtr(new RRset(Name("version.bind"),
+              finder2.add(RRsetPtr(new RRset(Name("version.bind"),
                                            RRClass::CH(), RRType::TXT(),
                                            RRTTL(0)))));
 
-    zone1.swap(zone2);
-    EXPECT_EQ(other_origin, zone1.getOrigin());
-    EXPECT_EQ(origin_, zone2.getOrigin());
-    EXPECT_EQ(RRClass::CH(), zone1.getClass());
-    EXPECT_EQ(RRClass::IN(), zone2.getClass());
+    finder1.swap(finder2);
+    EXPECT_EQ(other_origin, finder1.getOrigin());
+    EXPECT_EQ(origin_, finder2.getOrigin());
+    EXPECT_EQ(RRClass::CH(), finder1.getClass());
+    EXPECT_EQ(RRClass::IN(), finder2.getClass());
     // make sure the zone data is swapped, too
-    findTest(origin_, RRType::NS(), Zone::NXDOMAIN, false, ConstRRsetPtr(),
-             NULL, &zone1);
-    findTest(other_origin, RRType::TXT(), Zone::SUCCESS, false,
-             ConstRRsetPtr(), NULL, &zone1);
-    findTest(origin_, RRType::NS(), Zone::SUCCESS, false, ConstRRsetPtr(),
-             NULL, &zone2);
-    findTest(other_origin, RRType::TXT(), Zone::NXDOMAIN, false,
-             ConstRRsetPtr(), NULL, &zone2);
+    findTest(origin_, RRType::NS(), ZoneFinder::NXDOMAIN, false,
+             ConstRRsetPtr(), NULL, &finder1);
+    findTest(other_origin, RRType::TXT(), ZoneFinder::SUCCESS, false,
+             ConstRRsetPtr(), NULL, &finder1);
+    findTest(origin_, RRType::NS(), ZoneFinder::SUCCESS, false,
+             ConstRRsetPtr(), NULL, &finder2);
+    findTest(other_origin, RRType::TXT(), ZoneFinder::NXDOMAIN, false,
+             ConstRRsetPtr(), NULL, &finder2);
 }
 
-TEST_F(MemoryZoneTest, getFileName) {
+TEST_F(InMemoryZoneFinderTest, getFileName) {
     // for an empty zone the file name should also be empty.
-    EXPECT_TRUE(zone_.getFileName().empty());
+    EXPECT_TRUE(zone_finder_.getFileName().empty());
 
     // if loading a zone fails the file name shouldn't be set.
-    EXPECT_THROW(zone_.load(TEST_DATA_DIR "/root.zone"), MasterLoadError);
-    EXPECT_TRUE(zone_.getFileName().empty());
+    EXPECT_THROW(zone_finder_.load(TEST_DATA_DIR "/root.zone"),
+                 MasterLoadError);
+    EXPECT_TRUE(zone_finder_.getFileName().empty());
 
     // after a successful load, the specified file name should be set
-    MemoryZone rootzone(class_, Name("."));
+    InMemoryZoneFinder rootzone(class_, Name("."));
     EXPECT_NO_THROW(rootzone.load(TEST_DATA_DIR "/root.zone"));
     EXPECT_EQ(TEST_DATA_DIR "/root.zone", rootzone.getFileName());
     // overriding load, which will fail
@@ -1028,9 +1154,8 @@ TEST_F(MemoryZoneTest, getFileName) {
     EXPECT_EQ(TEST_DATA_DIR "/root.zone", rootzone.getFileName());
 
     // After swap, file names should also be swapped.
-    zone_.swap(rootzone);
-    EXPECT_EQ(TEST_DATA_DIR "/root.zone", zone_.getFileName());
+    zone_finder_.swap(rootzone);
+    EXPECT_EQ(TEST_DATA_DIR "/root.zone", zone_finder_.getFileName());
     EXPECT_TRUE(rootzone.getFileName().empty());
 }
-
 }
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
new file mode 100644
index 0000000..61341f6
--- /dev/null
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -0,0 +1,1194 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <vector>
+
+#include <datasrc/sqlite3_accessor.h>
+
+#include <datasrc/data_source.h>
+
+#include <dns/rrclass.h>
+
+#include <gtest/gtest.h>
+#include <boost/lexical_cast.hpp>
+#include <boost/scoped_ptr.hpp>
+#include <fstream>
+#include <sqlite3.h>
+
+using namespace std;
+using namespace isc::datasrc;
+using boost::shared_ptr;
+using boost::lexical_cast;
+using isc::data::ConstElementPtr;
+using isc::data::Element;
+using isc::dns::RRClass;
+using isc::dns::Name;
+
+namespace {
+// Some test data
+std::string SQLITE_DBFILE_EXAMPLE = TEST_DATA_DIR "/test.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE2 = TEST_DATA_DIR "/example2.com.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE2 = "sqlite3_example2.com.sqlite3";
+std::string SQLITE_DBFILE_EXAMPLE_ROOT = TEST_DATA_DIR "/test-root.sqlite3";
+std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
+std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
+std::string SQLITE_DBFILE_MEMORY = ":memory:";
+std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+std::string SQLITE_DBFILE_DIFFS = TEST_DATA_DIR "/diffs.sqlite3";
+
+// The following file must be non existent and must be non"creatable";
+// the sqlite3 library will try to create a new DB file if it doesn't exist,
+// so to test a failure case the create operation should also fail.
+// The "nodir", a non existent directory, is inserted for this purpose.
+std::string SQLITE_DBFILE_NOTEXIST = TEST_DATA_DIR "/nodir/notexist";
+
+// new db file, we don't need this to be a std::string, and given the
+// raw calls we use it in a const char* is more convenient
+const char* SQLITE_NEW_DBFILE = TEST_DATA_BUILDDIR "/newdb.sqlite3";
+
+// Opening works (the content is tested in different tests)
+TEST(SQLite3Open, common) {
+    EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE, "IN"));
+}
+
+// The file can't be opened
+TEST(SQLite3Open, notExist) {
+    EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_NOTEXIST, "IN"),
+                 SQLite3Error);
+}
+
+// It rejects broken DB
+TEST(SQLite3Open, brokenDB) {
+    EXPECT_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_BROKENDB, "IN"),
+                 SQLite3Error);
+}
+
+// Test we can create the schema on the fly
+TEST(SQLite3Open, memoryDB) {
+    EXPECT_NO_THROW(SQLite3Accessor accessor(SQLITE_DBFILE_MEMORY, "IN"));
+}
+
+// Test fixture for querying the db
+class SQLite3AccessorTest : public ::testing::Test {
+public:
+    SQLite3AccessorTest() {
+        initAccessor(SQLITE_DBFILE_EXAMPLE, "IN");
+    }
+    // So it can be re-created with different data
+    void initAccessor(const std::string& filename, const string& rrclass) {
+        accessor.reset(new SQLite3Accessor(filename, rrclass));
+    }
+    // The tested accessor
+    boost::shared_ptr<SQLite3Accessor> accessor;
+};
+
+// This zone exists in the data, so it should be found
+TEST_F(SQLite3AccessorTest, getZone) {
+    std::pair<bool, int> result(accessor->getZone("example.com."));
+    EXPECT_TRUE(result.first);
+    EXPECT_EQ(1, result.second);
+}
+
+// But it should find only the zone, nothing below it
+TEST_F(SQLite3AccessorTest, subZone) {
+    EXPECT_FALSE(accessor->getZone("sub.example.com.").first);
+}
+
+// This zone is not there at all
+TEST_F(SQLite3AccessorTest, noZone) {
+    EXPECT_FALSE(accessor->getZone("example.org.").first);
+}
+
+// This zone is there, but in different class
+TEST_F(SQLite3AccessorTest, noClass) {
+    initAccessor(SQLITE_DBFILE_EXAMPLE, "CH");
+    EXPECT_FALSE(accessor->getZone("example.com.").first);
+}
+
+// Simple check to test that the sequence is valid.  It gets the next record
+// from the iterator, checks that it is not null, then checks the data.
+void checkRR(DatabaseAccessor::IteratorContextPtr& context,
+     std::string name, std::string ttl, std::string type, std::string rdata) {
+
+    // Mark where we are in the text
+    SCOPED_TRACE(name + " " + ttl + " " + type + " " + rdata);
+
+    std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+    // Get next record
+    EXPECT_TRUE(context->getNext(data));
+
+    // ... and check expected values
+    EXPECT_EQ(name, data[DatabaseAccessor::NAME_COLUMN]);
+    EXPECT_EQ(ttl, data[DatabaseAccessor::TTL_COLUMN]);
+    EXPECT_EQ(type, data[DatabaseAccessor::TYPE_COLUMN]);
+    EXPECT_EQ(rdata, data[DatabaseAccessor::RDATA_COLUMN]);
+}
+
+// This tests the iterator context
+TEST_F(SQLite3AccessorTest, iterator) {
+    // Our test zone is conveniently small, but not empty
+    initAccessor(SQLITE_DBFILE_EXAMPLE_ORG, "IN");
+
+    const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+    ASSERT_TRUE(zone_info.first);
+
+    // Get the iterator context
+    DatabaseAccessor::IteratorContextPtr
+        context(accessor->getAllRecords(zone_info.second));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context);
+
+    std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+    checkRR(context, "example.org.", "3600", "MX", "10 mail.example.org.");
+    checkRR(context, "example.org.", "3600", "NS", "ns1.example.org.");
+    checkRR(context, "example.org.", "3600", "NS", "ns2.example.org.");
+    checkRR(context, "example.org.", "3600", "NS", "ns3.example.org.");
+    checkRR(context, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200");
+    checkRR(context, "dname.example.org.", "3600", "DNAME",
+            "dname.example.info.");
+    checkRR(context, "dname2.foo.example.org.", "3600", "DNAME",
+            "dname2.example.info.");
+    checkRR(context, "mail.example.org.", "3600", "A", "192.0.2.10");
+    checkRR(context, "sub.example.org.", "3600", "NS", "ns.sub.example.org.");
+    checkRR(context, "ns.sub.example.org.", "3600", "A", "192.0.2.101");
+    checkRR(context, "www.example.org.", "3600", "A", "192.0.2.1");
+
+    // Check there's no other
+    EXPECT_FALSE(context->getNext(data));
+
+    // And make sure calling it again won't cause problems.
+    EXPECT_FALSE(context->getNext(data));
+}
+
+// This tests the difference iterator context
+
+// Test that at attempt to create a difference iterator for a serial number
+// that does not exist throws an exception.
+TEST_F(SQLite3AccessorTest, diffIteratorNoRecords) {
+
+    // Our test zone is conveniently small, but not empty
+    initAccessor(SQLITE_DBFILE_DIFFS, "IN");
+
+    const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+    ASSERT_TRUE(zone_info.first);
+
+    // Get the iterator context.  Difference of version 1 does not exist, so
+    // this should throw an exception.
+    EXPECT_THROW(accessor->getDiffs(zone_info.second, 1, 1234),
+                 isc::datasrc::NoSuchSerial);
+
+    // Check that an invalid high version number also throws an exception.
+    EXPECT_THROW(accessor->getDiffs(zone_info.second, 1231, 2234),
+                 NoSuchSerial);
+
+    // Check that valid versions - but for the wrong zone which does not hold
+    // any records - also throws this exception.
+    EXPECT_THROW(accessor->getDiffs(zone_info.second + 42, 1231, 1234),
+                 NoSuchSerial);
+
+}
+
+// Try to iterate through a valid sets of differences
+TEST_F(SQLite3AccessorTest, diffIteratorSequences) {
+    std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+    // Our test zone is conveniently small, but not empty
+    initAccessor(SQLITE_DBFILE_DIFFS, "IN");
+    const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+    ASSERT_TRUE(zone_info.first);
+
+
+    // Check the difference sequence 1230-1231 (two adjacent differences)
+    // Get the iterator context
+    DatabaseAccessor::IteratorContextPtr
+        context1(accessor->getDiffs(zone_info.second, 1230, 1231));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context1);
+
+    // Change: 1230-1231
+    checkRR(context1, "example.org.", "1800", "SOA",
+            "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+    checkRR(context1, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+
+    // Check there's no other and that calling it again after no records doesn't
+    // cause problems.
+    EXPECT_FALSE(context1->getNext(data));
+    EXPECT_FALSE(context1->getNext(data));
+
+
+    // Check that the difference sequence 1231-1233 (two separate difference
+    // sequences) is OK.
+    DatabaseAccessor::IteratorContextPtr
+        context2(accessor->getDiffs(zone_info.second, 1231, 1233));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context2);
+
+    // Change 1231-1232
+    checkRR(context2, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+    checkRR(context2, "unused.example.org.", "3600", "A", "192.0.2.102");
+    checkRR(context2, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+    // Change: 1232-1233
+    checkRR(context2, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+    checkRR(context2, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+    checkRR(context2, "sub.example.org.", "3600", "NS", "ns.sub.example.org.");
+    checkRR(context2, "ns.sub.example.org.", "3600", "A", "192.0.2.101");
+
+    // Check there's no other and that calling it again after no records doesn't
+    // cause problems.
+    EXPECT_FALSE(context2->getNext(data));
+    EXPECT_FALSE(context2->getNext(data));
+
+
+    // Check that the difference sequence 4294967280 to 1230 (serial number
+    // rollover) is OK
+    DatabaseAccessor::IteratorContextPtr
+        context3(accessor->getDiffs(zone_info.second, 4294967280U, 1230));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context3);
+
+    // Change 4294967280 to 1230.
+    checkRR(context3, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 4294967280 3600 1800 2419200 7200");
+    checkRR(context3, "www.example.org.", "3600", "A", "192.0.2.31");
+    checkRR(context3, "example.org.", "1800", "SOA",
+            "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+    checkRR(context3, "www.example.org.", "3600", "A", "192.0.2.21");
+
+    EXPECT_FALSE(context3->getNext(data));
+    EXPECT_FALSE(context3->getNext(data));
+
+
+    // Check the difference sequence 1233-1231 (versions in wrong order).  This
+    // should give an empty difference set.
+    DatabaseAccessor::IteratorContextPtr
+        context4(accessor->getDiffs(zone_info.second, 1233, 1231));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context2);
+
+    EXPECT_FALSE(context4->getNext(data));
+    EXPECT_FALSE(context4->getNext(data));
+}
+
+TEST(SQLite3Open, getDBNameExample2) {
+    SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE2, "IN");
+    EXPECT_EQ(SQLITE_DBNAME_EXAMPLE2, accessor.getDBName());
+}
+
+TEST(SQLite3Open, getDBNameExampleROOT) {
+    SQLite3Accessor accessor(SQLITE_DBFILE_EXAMPLE_ROOT, "IN");
+    EXPECT_EQ(SQLITE_DBNAME_EXAMPLE_ROOT, accessor.getDBName());
+}
+
+// Simple function to match records
+void
+checkRecordRow(const std::string columns[],
+               const std::string& field0,
+               const std::string& field1,
+               const std::string& field2,
+               const std::string& field3,
+               const std::string& field4)
+{
+    EXPECT_EQ(field0, columns[DatabaseAccessor::TYPE_COLUMN]);
+    EXPECT_EQ(field1, columns[DatabaseAccessor::TTL_COLUMN]);
+    EXPECT_EQ(field2, columns[DatabaseAccessor::SIGTYPE_COLUMN]);
+    EXPECT_EQ(field3, columns[DatabaseAccessor::RDATA_COLUMN]);
+    EXPECT_EQ(field4, columns[DatabaseAccessor::NAME_COLUMN]);
+}
+
+TEST_F(SQLite3AccessorTest, getRecords) {
+    const std::pair<bool, int> zone_info(accessor->getZone("example.com."));
+    ASSERT_TRUE(zone_info.first);
+
+    const int zone_id = zone_info.second;
+    ASSERT_EQ(1, zone_id);
+
+    std::string columns[DatabaseAccessor::COLUMN_COUNT];
+
+    DatabaseAccessor::IteratorContextPtr
+        context(accessor->getRecords("foo.bar", 1));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(),
+              context);
+    EXPECT_FALSE(context->getNext(columns));
+    checkRecordRow(columns, "", "", "", "", "");
+
+    // now try some real searches
+    context = accessor->getRecords("foo.example.com.", zone_id);
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "CNAME", "3600", "",
+                   "cnametest.example.org.", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "RRSIG", "3600", "CNAME",
+                   "CNAME 5 3 3600 20100322084538 20100220084538 33495 "
+                   "example.com. FAKEFAKEFAKEFAKE", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "NSEC", "7200", "",
+                   "mail.example.com. CNAME RRSIG NSEC", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+                   "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+                   "example.com. FAKEFAKEFAKEFAKE", "");
+    EXPECT_FALSE(context->getNext(columns));
+
+    // with no more records, the array should not have been modified
+    checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+                   "NSEC 5 3 7200 20100322084538 20100220084538 33495 "
+                   "example.com. FAKEFAKEFAKEFAKE", "");
+
+    context = accessor->getRecords("example.com.", zone_id);
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "SOA", "3600", "",
+                   "master.example.com. admin.example.com. "
+                   "1234 3600 1800 2419200 7200", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "RRSIG", "3600", "SOA",
+                   "SOA 5 2 3600 20100322084538 20100220084538 "
+                   "33495 example.com. FAKEFAKEFAKEFAKE", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "NS", "1200", "", "dns01.example.com.", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "NS", "3600", "", "dns02.example.com.", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "NS", "1800", "", "dns03.example.com.", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "RRSIG", "3600", "NS",
+                   "NS 5 2 3600 20100322084538 20100220084538 "
+                   "33495 example.com. FAKEFAKEFAKEFAKE", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "MX", "3600", "", "10 mail.example.com.", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "MX", "3600", "",
+                   "20 mail.subzone.example.com.", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "RRSIG", "3600", "MX",
+                   "MX 5 2 3600 20100322084538 20100220084538 "
+                   "33495 example.com. FAKEFAKEFAKEFAKE", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "NSEC", "7200", "",
+                   "cname-ext.example.com. NS SOA MX RRSIG NSEC DNSKEY", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "RRSIG", "7200", "NSEC",
+                   "NSEC 5 2 7200 20100322084538 20100220084538 "
+                   "33495 example.com. FAKEFAKEFAKEFAKE", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "DNSKEY", "3600", "",
+                   "256 3 5 AwEAAcOUBllYc1hf7ND9uDy+Yz1BF3sI0m4q NGV7W"
+                   "cTD0WEiuV7IjXgHE36fCmS9QsUxSSOV o1I/FMxI2PJVqTYHkX"
+                   "FBS7AzLGsQYMU7UjBZ SotBJ6Imt5pXMu+lEDNy8TOUzG3xm7g"
+                   "0qcbW YF6qCEfvZoBtAqi5Rk7Mlrqs8agxYyMx", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "DNSKEY", "3600", "",
+                   "257 3 5 AwEAAe5WFbxdCPq2jZrZhlMj7oJdff3W7syJ tbvzg"
+                   "62tRx0gkoCDoBI9DPjlOQG0UAbj+xUV 4HQZJStJaZ+fHU5AwV"
+                   "NT+bBZdtV+NujSikhd THb4FYLg2b3Cx9NyJvAVukHp/91HnWu"
+                   "G4T36 CzAFrfPwsHIrBz9BsaIQ21VRkcmj7DswfI/i DGd8j6b"
+                   "qiODyNZYQ+ZrLmF0KIJ2yPN3iO6Zq 23TaOrVTjB7d1a/h31OD"
+                   "fiHAxFHrkY3t3D5J R9Nsl/7fdRmSznwtcSDgLXBoFEYmw6p86"
+                   "Acv RyoYNcL1SXjaKVLG5jyU3UR+LcGZT5t/0xGf oIK/aKwEN"
+                   "rsjcKZZj660b1M=", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+                   "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+                   "4456 example.com. FAKEFAKEFAKEFAKE", "");
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+                   "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+                   "33495 example.com. FAKEFAKEFAKEFAKE", "");
+    EXPECT_FALSE(context->getNext(columns));
+    // getnextrecord returning false should mean array is not altered
+    checkRecordRow(columns, "RRSIG", "3600", "DNSKEY",
+                   "DNSKEY 5 2 3600 20100322084538 20100220084538 "
+                   "33495 example.com. FAKEFAKEFAKEFAKE", "");
+
+    // check that another getNext does not cause problems
+    EXPECT_FALSE(context->getNext(columns));
+
+    // Try searching for subdomain
+    // There's foo.bar.example.com in the data
+    context = accessor->getRecords("bar.example.com.", zone_id, true);
+    ASSERT_TRUE(context->getNext(columns));
+    checkRecordRow(columns, "A", "3600", "", "192.0.2.1", "");
+    EXPECT_FALSE(context->getNext(columns));
+    // But we shouldn't match mix.example.com here
+    context = accessor->getRecords("ix.example.com.", zone_id, true);
+    EXPECT_FALSE(context->getNext(columns));
+}
+
+TEST_F(SQLite3AccessorTest, findPrevious) {
+    EXPECT_EQ("dns01.example.com.",
+              accessor->findPreviousName(1, "com.example.dns02."));
+    // A name that doesn't exist
+    EXPECT_EQ("dns01.example.com.",
+              accessor->findPreviousName(1, "com.example.dns01x."));
+    // Largest name
+    EXPECT_EQ("www.example.com.",
+              accessor->findPreviousName(1, "com.example.wwww"));
+    // Out of zone after the last name
+    EXPECT_EQ("www.example.com.",
+              accessor->findPreviousName(1, "org.example."));
+    // Case insensitive?
+    EXPECT_EQ("dns01.example.com.",
+              accessor->findPreviousName(1, "com.exaMple.DNS02."));
+    // A name that doesn't exist
+    EXPECT_EQ("dns01.example.com.",
+              accessor->findPreviousName(1, "com.exaMple.DNS01X."));
+    // The DB contains foo.bar.example.com., which would be in between
+    // these two names. However, that one does not have an NSEC record,
+    // which is how this database recognizes glue data, so it should
+    // be skipped.
+    EXPECT_EQ("example.com.",
+              accessor->findPreviousName(1, "com.example.cname-ext."));
+    // Throw when we are before the origin
+    EXPECT_THROW(accessor->findPreviousName(1, "com.example."),
+                 isc::NotImplemented);
+    EXPECT_THROW(accessor->findPreviousName(1, "a.example."),
+                 isc::NotImplemented);
+}
+
+TEST_F(SQLite3AccessorTest, findPreviousNoData) {
+    // This one doesn't hold any NSEC records, so it shouldn't work
+    // The underlying DB/data don't support DNSSEC, so it's not implemented
+    // (does it make sense? Or different exception here?)
+    EXPECT_THROW(accessor->findPreviousName(3, "com.example.sql2.www."),
+                 isc::NotImplemented);
+}
+
+// Test fixture for creating a db that automatically deletes it before start,
+// and when done
+class SQLite3Create : public ::testing::Test {
+public:
+    SQLite3Create() {
+        remove(SQLITE_NEW_DBFILE);
+    }
+
+    ~SQLite3Create() {
+        remove(SQLITE_NEW_DBFILE);
+    }
+};
+
+bool isReadable(const char* filename) {
+    return (std::ifstream(filename).is_open());
+}
+
+TEST_F(SQLite3Create, creationtest) {
+    ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+    // Should simply be created
+    SQLite3Accessor accessor(SQLITE_NEW_DBFILE, "IN");
+    ASSERT_TRUE(isReadable(SQLITE_NEW_DBFILE));
+}
+
+TEST_F(SQLite3Create, emptytest) {
+    ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+
+    // open one manualle
+    sqlite3* db;
+    ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
+
+    // empty, but not locked, so creating it now should work
+    SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, "IN");
+
+    sqlite3_close(db);
+
+    // should work now that we closed it
+    SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, "IN");
+}
+
+TEST_F(SQLite3Create, lockedtest) {
+    ASSERT_FALSE(isReadable(SQLITE_NEW_DBFILE));
+
+    // open one manually
+    sqlite3* db;
+    ASSERT_EQ(SQLITE_OK, sqlite3_open(SQLITE_NEW_DBFILE, &db));
+    sqlite3_exec(db, "BEGIN EXCLUSIVE TRANSACTION", NULL, NULL, NULL);
+
+    // should not be able to open it
+    EXPECT_THROW(SQLite3Accessor accessor2(SQLITE_NEW_DBFILE, "IN"),
+                 SQLite3Error);
+
+    sqlite3_exec(db, "ROLLBACK TRANSACTION", NULL, NULL, NULL);
+
+    // should work now that we closed it
+    SQLite3Accessor accessor3(SQLITE_NEW_DBFILE, "IN");
+}
+
+TEST_F(SQLite3AccessorTest, clone) {
+    shared_ptr<DatabaseAccessor> cloned = accessor->clone();
+    EXPECT_EQ(accessor->getDBName(), cloned->getDBName());
+
+    // The cloned accessor should have a separate connection and search
+    // context, so it should be able to perform search in concurrent with
+    // the original accessor.
+    string columns1[DatabaseAccessor::COLUMN_COUNT];
+    string columns2[DatabaseAccessor::COLUMN_COUNT];
+
+    const std::pair<bool, int> zone_info1(
+        accessor->getZone("example.com."));
+    DatabaseAccessor::IteratorContextPtr iterator1 =
+        accessor->getRecords("foo.example.com.", zone_info1.second);
+    const std::pair<bool, int> zone_info2(
+        accessor->getZone("example.com."));
+    DatabaseAccessor::IteratorContextPtr iterator2 =
+        cloned->getRecords("foo.example.com.", zone_info2.second);
+
+    ASSERT_TRUE(iterator1->getNext(columns1));
+    checkRecordRow(columns1, "CNAME", "3600", "", "cnametest.example.org.",
+                   "");
+
+    ASSERT_TRUE(iterator2->getNext(columns2));
+    checkRecordRow(columns2, "CNAME", "3600", "", "cnametest.example.org.",
+                   "");
+}
+
+//
+// Commonly used data for update tests
+//
+const char* const common_expected_data[] = {
+    // Test record already stored in the tested sqlite3 DB file.
+    "foo.bar.example.com.", "com.example.bar.foo.", "3600", "A", "",
+    "192.0.2.1"
+};
+const char* const new_data[] = {
+    // Newly added data commonly used by some of the tests below
+    "newdata.example.com.", "com.example.newdata.", "3600", "A", "",
+    "192.0.2.1"
+};
+const char* const deleted_data[] = {
+    // Existing data to be removed commonly used by some of the tests below
+    "foo.bar.example.com.", "A", "192.0.2.1"
+};
+
+class SQLite3Update : public SQLite3AccessorTest {
+protected:
+    SQLite3Update() {
+        // Note: if "installing" the test file fails some of the subsequent
+        // tests would fail.
+        const char *install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+                                  "/test.sqlite3 " TEST_DATA_BUILDDIR
+                                  "/test.sqlite3.copied";
+        if (system(install_cmd) != 0) {
+            // any exception will do, this is failure in test setup, but nice
+            // to show the command that fails, and shouldn't be caught
+            isc_throw(isc::Exception,
+                      "Error setting up; command failed: " << install_cmd);
+        };
+        initAccessor(TEST_DATA_BUILDDIR "/test.sqlite3.copied", "IN");
+        zone_id = accessor->getZone("example.com.").second;
+        another_accessor.reset(new SQLite3Accessor(
+                                   TEST_DATA_BUILDDIR "/test.sqlite3.copied",
+                                   "IN"));
+        expected_stored.push_back(common_expected_data);
+    }
+
+    int zone_id;
+    std::string get_columns[DatabaseAccessor::COLUMN_COUNT];
+    std::string add_columns[DatabaseAccessor::ADD_COLUMN_COUNT];
+    std::string del_params[DatabaseAccessor::DEL_PARAM_COUNT];
+    std::string diff_params[DatabaseAccessor::DIFF_PARAM_COUNT];
+
+    vector<const char* const*> expected_stored; // placeholder for checkRecords
+    vector<const char* const*> empty_stored; // indicate no corresponding data
+
+    // Another accessor, emulating one running on a different process/thread
+    shared_ptr<SQLite3Accessor> another_accessor;
+    DatabaseAccessor::IteratorContextPtr iterator;
+};
+
+void
+checkRecords(SQLite3Accessor& accessor, int zone_id, const std::string& name,
+             vector<const char* const*> expected_rows)
+{
+    DatabaseAccessor::IteratorContextPtr iterator =
+        accessor.getRecords(name, zone_id);
+    std::string columns[DatabaseAccessor::COLUMN_COUNT];
+    vector<const char* const*>::const_iterator it = expected_rows.begin();
+    while (iterator->getNext(columns)) {
+        ASSERT_TRUE(it != expected_rows.end());
+        checkRecordRow(columns, (*it)[3], (*it)[2], (*it)[4], (*it)[5], "");
+        ++it;
+    }
+    EXPECT_TRUE(it == expected_rows.end());
+}
+
+TEST_F(SQLite3Update, emptyUpdate) {
+    // If we do nothing between start and commit, the zone content
+    // should be intact.
+
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+    accessor->commit();
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, flushZone) {
+    // With 'replace' being true startUpdateZone() will flush the existing
+    // zone content.
+
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+    zone_id = accessor->startUpdateZone("example.com.", true).second;
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+    accessor->commit();
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, readWhileUpdate) {
+    zone_id = accessor->startUpdateZone("example.com.", true).second;
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+    // Until commit is done, the other accessor should see the old data
+    checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+                 expected_stored);
+
+    // Once the changes are committed, the other accessor will see the new
+    // data.
+    accessor->commit();
+    checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+                 empty_stored);
+}
+
+TEST_F(SQLite3Update, rollback) {
+    zone_id = accessor->startUpdateZone("example.com.", true).second;
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+    // Rollback will revert the change made by startUpdateZone(, true).
+    accessor->rollback();
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, rollbackFailure) {
+    // This test emulates a rare scenario of making rollback attempt fail.
+    // The iterator is paused in the middle of getting records, which prevents
+    // the rollback operation at the end of the test.
+
+    string columns[DatabaseAccessor::COLUMN_COUNT];
+    iterator = accessor->getRecords("example.com.", zone_id);
+    EXPECT_TRUE(iterator->getNext(columns));
+
+    accessor->startUpdateZone("example.com.", true);
+    EXPECT_THROW(accessor->rollback(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, commitConflict) {
+    // Start reading the DB by another accessor.  We should stop at a single
+    // call to getNextRecord() to keep holding the lock.
+    iterator = another_accessor->getRecords("foo.example.com.", zone_id);
+    EXPECT_TRUE(iterator->getNext(get_columns));
+
+    // Due to getNextRecord() above, the other accessor holds a DB lock,
+    // which will prevent commit.
+    zone_id = accessor->startUpdateZone("example.com.", true).second;
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+    EXPECT_THROW(accessor->commit(), DataSourceError);
+    accessor->rollback();   // rollback should still succeed
+
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, updateConflict) {
+    // Similar to the previous case, but this is a conflict with another
+    // update attempt.  Note that these two accessors modify disjoint sets
+    // of data; sqlite3 only has a coarse-grained lock so we cannot allow
+    // these updates to run concurrently.
+    EXPECT_TRUE(another_accessor->startUpdateZone("sql1.example.com.",
+                                                  true).first);
+    EXPECT_THROW(accessor->startUpdateZone("example.com.", true),
+                 DataSourceError);
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+    // Once we rollback the other attempt of change, we should be able to
+    // start and commit the transaction using the main accessor.
+    another_accessor->rollback();
+    accessor->startUpdateZone("example.com.", true);
+    accessor->commit();
+}
+
+TEST_F(SQLite3Update, duplicateUpdate) {
+    accessor->startUpdateZone("example.com.", false);
+    EXPECT_THROW(accessor->startUpdateZone("example.com.", false),
+                 DataSourceError);
+}
+
+TEST_F(SQLite3Update, commitWithoutTransaction) {
+    EXPECT_THROW(accessor->commit(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, rollbackWithoutTransaction) {
+    EXPECT_THROW(accessor->rollback(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, addRecord) {
+    // Before update, there should be no record for this name
+    checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
+
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+    copy(new_data, new_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+         add_columns);
+    accessor->addRecordToZone(add_columns);
+
+    expected_stored.clear();
+    expected_stored.push_back(new_data);
+    checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+
+    // Commit the change, and confirm the new data is still there.
+    accessor->commit();
+    checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, addThenRollback) {
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+    copy(new_data, new_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+         add_columns);
+    accessor->addRecordToZone(add_columns);
+
+    expected_stored.clear();
+    expected_stored.push_back(new_data);
+    checkRecords(*accessor, zone_id, "newdata.example.com.", expected_stored);
+
+    accessor->rollback();
+    checkRecords(*accessor, zone_id, "newdata.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, duplicateAdd) {
+    const char* const dup_data[] = {
+        "foo.bar.example.com.", "com.example.bar.foo.", "3600", "A", "",
+        "192.0.2.1"
+    };
+    expected_stored.clear();
+    expected_stored.push_back(dup_data);
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+    // Adding exactly the same data.  As this backend is "dumb", another
+    // row of the same content will be inserted.
+    copy(dup_data, dup_data + DatabaseAccessor::ADD_COLUMN_COUNT,
+         add_columns);
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+    accessor->addRecordToZone(add_columns);
+    expected_stored.push_back(dup_data);
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, invalidAdd) {
+    // An attempt of add before an explicit start of transaction
+    EXPECT_THROW(accessor->addRecordToZone(add_columns), DataSourceError);
+}
+
+TEST_F(SQLite3Update, deleteRecord) {
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+    copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+         del_params);
+    accessor->deleteRecordInZone(del_params);
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+    // Commit the change, and confirm the deleted data still isn't there.
+    accessor->commit();
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+}
+
+TEST_F(SQLite3Update, deleteThenRollback) {
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+    copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+         del_params);
+    accessor->deleteRecordInZone(del_params);
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", empty_stored);
+
+    // Rollback the change, and confirm the data still exists.
+    accessor->rollback();
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, deleteNonexistent) {
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+    copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+         del_params);
+
+    // Replace the name with a non existent one, then try to delete it.
+    // nothing should happen.
+    del_params[DatabaseAccessor::DEL_NAME] = "no-such-name.example.com.";
+    checkRecords(*accessor, zone_id, "no-such-name.example.com.",
+                 empty_stored);
+    accessor->deleteRecordInZone(del_params);
+    checkRecords(*accessor, zone_id, "no-such-name.example.com.",
+                 empty_stored);
+
+    // Name exists but the RR type is different.  Delete attempt shouldn't
+    // delete only by name.
+    copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+         del_params);
+    del_params[DatabaseAccessor::DEL_TYPE] = "AAAA";
+    accessor->deleteRecordInZone(del_params);
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+    // Similar to the previous case, but RDATA is different.
+    copy(deleted_data, deleted_data + DatabaseAccessor::DEL_PARAM_COUNT,
+         del_params);
+    del_params[DatabaseAccessor::DEL_RDATA] = "192.0.2.2";
+    accessor->deleteRecordInZone(del_params);
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, invalidDelete) {
+    // An attempt of delete before an explicit start of transaction
+    EXPECT_THROW(accessor->deleteRecordInZone(del_params), DataSourceError);
+}
+
+TEST_F(SQLite3Update, emptyTransaction) {
+    // A generic transaction without doing anything inside it.  Just check
+    // it doesn't throw or break the database.
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+    accessor->startTransaction();
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+    accessor->commit();
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+}
+
+TEST_F(SQLite3Update, duplicateTransaction) {
+    accessor->startTransaction();
+    EXPECT_THROW(accessor->startTransaction(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, transactionInUpdate) {
+    accessor->startUpdateZone("example.com.", true);
+    EXPECT_THROW(accessor->startTransaction(), DataSourceError);
+}
+
+TEST_F(SQLite3Update, updateInTransaction) {
+    accessor->startTransaction();
+    EXPECT_THROW(accessor->startUpdateZone("example.com.", true),
+                 DataSourceError);
+}
+
+TEST_F(SQLite3Update, updateWithTransaction) {
+    // Start a read-only transaction, wherein we execute two reads.
+    // Meanwhile we start a write (update) transaction.  The commit attempt
+    // for the write transaction will due to the lock held by the read
+    // transaction.  The database should be intact.
+    another_accessor->startTransaction();
+    checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+                 expected_stored);
+
+    ASSERT_TRUE(accessor->startUpdateZone("example.com.", true).first);
+    EXPECT_THROW(accessor->commit(), DataSourceError);
+
+    checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+                 expected_stored);
+    another_accessor->commit(); // this shouldn't throw
+}
+
+TEST_F(SQLite3Update, updateWithoutTransaction) {
+    // Similar to the previous test, but reads are not protected in a
+    // transaction.  So the write transaction will succeed and flush the DB,
+    // and the result of the second read is different from the first.
+    checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+                 expected_stored);
+
+    ASSERT_TRUE(accessor->startUpdateZone("example.com.", true).first);
+    accessor->commit();
+
+    checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+                 empty_stored);
+}
+
+TEST_F(SQLite3Update, concurrentTransactions) {
+    // Two read-only transactions coexist (unlike the read vs write)
+    // Start one transaction.
+    accessor->startTransaction();
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+
+    // Start a new one.
+    another_accessor->startTransaction();
+
+    // The second transaction doesn't affect the first or vice versa.
+    checkRecords(*accessor, zone_id, "foo.bar.example.com.", expected_stored);
+    checkRecords(*another_accessor, zone_id, "foo.bar.example.com.",
+                 expected_stored);
+
+    // Commit should be successful for both transactions.
+    accessor->commit();
+    another_accessor->commit();
+}
+
+//
+// Commonly used data for diff related tests.  The last two entries are
+// a textual representation of "version" and a textual representation of
+// diff operation (either DIFF_ADD_TEXT or DIFF_DELETE_TEXT).  We use this
+// format for the convenience of generating test data and checking the results.
+//
+const char* const DIFF_ADD_TEXT = "0";
+const char* const DIFF_DELETE_TEXT = "1";
+const char* const diff_begin_data[] = {
+    "example.com.", "SOA", "3600",
+    "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200",
+    "1234", DIFF_DELETE_TEXT
+};
+const char* const diff_del_a_data[] = {
+    "dns01.example.com.", "A", "3600", "192.0.2.1", "1234", DIFF_DELETE_TEXT
+};
+const char* const diff_end_data[] = {
+    "example.com.", "SOA", "3600",
+    "ns.example.com. admin.example.com. 1300 3600 1800 2419200 7200",
+    "1300", DIFF_ADD_TEXT
+};
+const char* const diff_add_a_data[] = {
+    "dns01.example.com.", "A", "3600", "192.0.2.10", "1234", DIFF_ADD_TEXT
+};
+
+// The following two are helper functions to convert textual test data
+// to integral zone ID and diff operation.
+int
+getVersion(const char* const diff_data[]) {
+    return (lexical_cast<int>(diff_data[DatabaseAccessor::DIFF_PARAM_COUNT]));
+}
+
+DatabaseAccessor::DiffOperation
+getOperation(const char* const diff_data[]) {
+    return (static_cast<DatabaseAccessor::DiffOperation>(
+                lexical_cast<int>(
+                    diff_data[DatabaseAccessor::DIFF_PARAM_COUNT + 1])));
+}
+
+// Common checker function that compares expected and actual sequence of
+// diffs.
+void
+checkDiffs(const vector<const char* const*>& expected,
+           const vector<vector<string> >& actual)
+{
+    EXPECT_EQ(expected.size(), actual.size());
+    const size_t n_diffs = std::min(expected.size(), actual.size());
+    for (size_t i = 0; i < n_diffs; ++i) {
+        for (int j = 0; j < actual[i].size(); ++j) {
+            EXPECT_EQ(expected[i][j], actual[i][j]);
+        }
+    }
+}
+
+TEST_F(SQLite3Update, addRecordDiff) {
+    // A simple case of adding diffs: just changing the SOA, and confirm
+    // the diffs are stored as expected.
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+    copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+                            getOperation(diff_begin_data), diff_params);
+
+    copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+                            getOperation(diff_end_data), diff_params);
+
+    // Until the diffs are committed, they are not visible to other accessors.
+    EXPECT_TRUE(another_accessor->getRecordDiff(zone_id).empty());
+
+    accessor->commit();
+
+    expected_stored.clear();
+    expected_stored.push_back(diff_begin_data);
+    expected_stored.push_back(diff_end_data);
+    checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+    // Now it should be visible to others, too.
+    checkDiffs(expected_stored, another_accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addRecordOfLargeSerial) {
+    // This is essentially the same as the previous test, but using a
+    // very large "version" (SOA serial), which is actually the possible
+    // largest value to confirm the internal code doesn't have an overflow bug
+    // or other failure due to the larger value.
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+    const char* const begin_data[] = {
+        "example.com.", "SOA", "3600",
+        "ns.example.com. admin.example.com. 4294967295 3600 1800 2419200 7200",
+        "4294967295", DIFF_DELETE_TEXT
+    };
+
+    copy(begin_data, begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    // For "serial" parameter, we intentionally hardcode the value rather
+    // than converting it from the data.
+    accessor->addRecordDiff(zone_id, 0xffffffff, getOperation(diff_begin_data),
+                            diff_params);
+    copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+                            getOperation(diff_end_data), diff_params);
+
+    accessor->commit();
+
+    expected_stored.clear();
+    expected_stored.push_back(begin_data);
+    expected_stored.push_back(diff_end_data);
+    checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithoutUpdate) {
+    // Right now we require startUpdateZone() prior to performing
+    // addRecordDiff.
+    copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    EXPECT_THROW(accessor->addRecordDiff(0, getVersion(diff_begin_data),
+                                         getOperation(diff_begin_data),
+                                         diff_params),
+                 DataSourceError);
+
+    // For now, we don't allow adding diffs in a general transaction either.
+    accessor->startTransaction();
+    EXPECT_THROW(accessor->addRecordDiff(0, getVersion(diff_begin_data),
+                                         getOperation(diff_begin_data),
+                                         diff_params),
+                 DataSourceError);
+}
+
+TEST_F(SQLite3Update, addDiffWithBadZoneID) {
+    // For now, we require zone ID passed to addRecordDiff be equal to
+    // that for the zone being updated.
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+    copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    EXPECT_THROW(accessor->addRecordDiff(zone_id + 1,
+                                         getVersion(diff_begin_data),
+                                         getOperation(diff_begin_data),
+                                         diff_params),
+                 DataSourceError);
+}
+
+TEST_F(SQLite3Update, addDiffRollback) {
+    // Rollback tentatively added diffs.  This is no different from the
+    // update case, but we test it explicitly just in case.
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+    copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+                            getOperation(diff_begin_data), diff_params);
+    accessor->rollback();
+
+    EXPECT_TRUE(accessor->getRecordDiff(zone_id).empty());
+}
+
+TEST_F(SQLite3Update, addDiffInBadOrder) {
+    // At this level, the API is naive, and doesn't care if the diff sequence
+    // is a valid IXFR order.
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+    // Add diff of 'end', then 'begin'
+    copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+                            getOperation(diff_end_data), diff_params);
+
+    copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+                            getOperation(diff_begin_data), diff_params);
+
+    accessor->commit();
+
+    expected_stored.clear();
+    expected_stored.push_back(diff_end_data);
+    expected_stored.push_back(diff_begin_data);
+    checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithUpdate) {
+    // A more realistic example: add corresponding diffs while updating zone.
+    // Implementation wise, there should be no reason this could fail if
+    // the basic tests so far pass.  But we check it in case we miss something.
+
+    const char* const old_a_record[] = {
+        "dns01.example.com.", "A", "192.0.2.1"
+    };
+    const char* const new_a_record[] = {
+        "dns01.example.com.", "com.example.dns01.", "3600", "A", "",
+        "192.0.2.10"
+    };
+    const char* const old_soa_record[] = {
+        "example.com.", "SOA",
+        "ns.example.com. admin.example.com. 1234 3600 1800 2419200 7200",
+    };
+    const char* const new_soa_record[] = {
+        "dns01.example.com.", "com.example.dns01.", "3600", "A", "",
+        "ns.example.com. admin.example.com. 1300 3600 1800 2419200 7200",
+    };
+
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+
+    // Delete SOA (and add that diff)
+    copy(old_soa_record, old_soa_record + DatabaseAccessor::DEL_PARAM_COUNT,
+         del_params);
+    accessor->deleteRecordInZone(del_params);
+    copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+                            getOperation(diff_begin_data), diff_params);
+
+    // Delete A
+    copy(old_a_record, old_a_record + DatabaseAccessor::DEL_PARAM_COUNT,
+         del_params);
+    accessor->deleteRecordInZone(del_params);
+    copy(diff_del_a_data, diff_del_a_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_del_a_data),
+                            getOperation(diff_del_a_data), diff_params);
+
+    // Add SOA
+    copy(new_soa_record, new_soa_record + DatabaseAccessor::ADD_COLUMN_COUNT,
+         add_columns);
+    accessor->addRecordToZone(add_columns);
+    copy(diff_end_data, diff_end_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_end_data),
+                            getOperation(diff_end_data), diff_params);
+
+    // Add A
+    copy(new_a_record, new_a_record + DatabaseAccessor::ADD_COLUMN_COUNT,
+         add_columns);
+    accessor->addRecordToZone(add_columns);
+    copy(diff_add_a_data, diff_add_a_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    accessor->addRecordDiff(zone_id, getVersion(diff_add_a_data),
+                            getOperation(diff_add_a_data), diff_params);
+
+    accessor->commit();
+
+    expected_stored.clear();
+    expected_stored.push_back(diff_begin_data);
+    expected_stored.push_back(diff_del_a_data);
+    expected_stored.push_back(diff_end_data);
+    expected_stored.push_back(diff_add_a_data);
+
+    checkDiffs(expected_stored, accessor->getRecordDiff(zone_id));
+}
+
+TEST_F(SQLite3Update, addDiffWithNoTable) {
+    // An attempt of adding diffs to an old version of database that doesn't
+    // have a diffs table.  This will fail in preparing the statement.
+    initAccessor(SQLITE_DBFILE_EXAMPLE + ".nodiffs", "IN");
+    zone_id = accessor->startUpdateZone("example.com.", false).second;
+    copy(diff_begin_data, diff_begin_data + DatabaseAccessor::DIFF_PARAM_COUNT,
+         diff_params);
+    EXPECT_THROW(accessor->addRecordDiff(zone_id, getVersion(diff_begin_data),
+                                         getOperation(diff_begin_data),
+                                         diff_params),
+                 SQLite3Error);
+}
+} // end anonymous namespace
diff --git a/src/lib/datasrc/tests/static_unittest.cc b/src/lib/datasrc/tests/static_unittest.cc
index a11e889..4c9fe42 100644
--- a/src/lib/datasrc/tests/static_unittest.cc
+++ b/src/lib/datasrc/tests/static_unittest.cc
@@ -53,6 +53,7 @@ protected:
 
         // NOTE: in addition, the order of the following items matter.
         authors_data.push_back("Chen Zhengzhang");
+        authors_data.push_back("Dmitriy Volodin");
         authors_data.push_back("Evan Hunt");
         authors_data.push_back("Haidong Wang");
         authors_data.push_back("Han Feng");
diff --git a/src/lib/datasrc/tests/testdata/Makefile.am b/src/lib/datasrc/tests/testdata/Makefile.am
new file mode 100644
index 0000000..6a35fe3
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/Makefile.am
@@ -0,0 +1 @@
+CLEANFILES = *.copied
diff --git a/src/lib/datasrc/tests/testdata/brokendb.sqlite3 b/src/lib/datasrc/tests/testdata/brokendb.sqlite3
index 7aad3af..63f3cc5 100644
Binary files a/src/lib/datasrc/tests/testdata/brokendb.sqlite3 and b/src/lib/datasrc/tests/testdata/brokendb.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/diffs.sqlite3 b/src/lib/datasrc/tests/testdata/diffs.sqlite3
new file mode 100644
index 0000000..3820563
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/diffs.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/diffs_table.sql b/src/lib/datasrc/tests/testdata/diffs_table.sql
new file mode 100644
index 0000000..0e05207
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/diffs_table.sql
@@ -0,0 +1,123 @@
+-- Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+--
+-- Permission to use, copy, modify, and/or distribute this software for any
+-- purpose with or without fee is hereby granted, provided that the above
+-- copyright notice and this permission notice appear in all copies.
+--
+-- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+-- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+-- AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+-- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+-- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+-- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+-- PERFORMANCE OF THIS SOFTWARE.
+
+-- \brief Create Differences Table
+--
+-- This is a short-term solution to creating the differences table for testing
+-- purposes.
+--
+-- It is assumed that the database used is a copy of the "example.org.sqlite3"
+-- database in this test directory.  The diffs table is created and populated
+-- with a set of RRs that purport to represent differences that end in the
+-- zone as is.
+--
+-- The file can be executed by the command:
+-- % sqlite3 -init <this-file> <database-file> ".quit"
+--
+-- The file gets executed as the set of SQL statements on the database file,
+-- the ".quit" on the command line then  getting executed to exit SQLite3.
+
+-- Create the diffs table
+DROP TABLE diffs;
+CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+                    zone_id INTEGER NOT NULL,
+                    version INTEGER NOT NULL,
+                    operation INTEGER NOT NULL,
+                    name STRING NOT NULL COLLATE NOCASE,
+                    rrtype STRING NOT NULL COLLATE NOCASE,
+                    ttl INTEGER NOT NULL,
+                    rdata STRING NOT NULL);
+
+-- Populate it.  A dummy zone_id is used for now - this will be updated last of
+-- all.
+
+-- Change from 4294967280 (0xfffffff0) to 1230 to show serial rollover
+-- Update one record in the zone.
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 4294967280,  1, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 4294967280 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 4294967280, 1, "www.example.org.", "A", 3600, "192.0.2.31");
+
+-- Records added in version 1230 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1230, 0, "example.org.", "SOA", 1800,
+           "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1230, 0, "www.example.org.", "A", 3600, "192.0.2.21");
+
+-- Change 1230 to 1231: Change change a parameter of the SOA record
+-- Records removed from version 1230 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1230, 1, "example.org.", "SOA", 1800,
+           "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+
+-- Records added in version 1231 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1231, 0, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+
+
+-- Change 1231 to 1232: Remove one record, don't add anything.
+-- Records removed from version 1231 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1231, 1, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1231, 1, "unused.example.org.", "A", 3600, "192.0.2.102");
+
+-- Records added in version 1232 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1232, 0, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+-- Change 1232 to 1233: Add two, don't remove anything.
+-- Records removed from version 1232 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1232, 1, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+-- Records added in version 1233 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 0, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 0, "sub.example.org.", "NS", 3600, "ns.sub.example.org.");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 0, "ns.sub.example.org.", "A", 3600, "192.0.2.101");
+
+
+-- Change 1233 to 1234: change addresses of two A records
+-- Records removed from version 1233 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 1, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 1, "www.example.org.", "A", 3600, "192.0.2.21");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 1, "mail.example.org.", "A", 3600, "192.0.2.210");
+
+-- Records added in version 1234 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1234, 0, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1234, 0, "www.example.org.", "A", 3600, "192.0.2.1");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1234, 0, "mail.example.org.", "A", 3600, "192.0.2.10");
+
+-- Finally, update the zone_id in the diffs table with what is actually
+-- in the zone table.
+UPDATE diffs SET zone_id =
+   (SELECT id FROM ZONES LIMIT 1);
diff --git a/src/lib/datasrc/tests/testdata/example.org.sqlite3 b/src/lib/datasrc/tests/testdata/example.org.sqlite3
index 070012f..60e6e05 100644
Binary files a/src/lib/datasrc/tests/testdata/example.org.sqlite3 and b/src/lib/datasrc/tests/testdata/example.org.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 b/src/lib/datasrc/tests/testdata/example2.com.sqlite3
index 8d3bb34..9da7d0e 100644
Binary files a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 and b/src/lib/datasrc/tests/testdata/example2.com.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 b/src/lib/datasrc/tests/testdata/rwtest.sqlite3
new file mode 100644
index 0000000..ccbb884
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/rwtest.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test-root.sqlite3 b/src/lib/datasrc/tests/testdata/test-root.sqlite3
index 7cc6195..c1dae47 100644
Binary files a/src/lib/datasrc/tests/testdata/test-root.sqlite3 and b/src/lib/datasrc/tests/testdata/test-root.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3 b/src/lib/datasrc/tests/testdata/test.sqlite3
index cc8cfc3..521cf31 100644
Binary files a/src/lib/datasrc/tests/testdata/test.sqlite3 and b/src/lib/datasrc/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/datasrc/tests/zonetable_unittest.cc b/src/lib/datasrc/tests/zonetable_unittest.cc
index a117176..fa74c0e 100644
--- a/src/lib/datasrc/tests/zonetable_unittest.cc
+++ b/src/lib/datasrc/tests/zonetable_unittest.cc
@@ -18,7 +18,7 @@
 #include <dns/rrclass.h>
 
 #include <datasrc/zonetable.h>
-// We use MemoryZone to put something into the table
+// We use InMemoryZone to put something into the table
 #include <datasrc/memory_datasrc.h>
 
 #include <gtest/gtest.h>
@@ -28,31 +28,32 @@ using namespace isc::datasrc;
 
 namespace {
 TEST(ZoneTest, init) {
-    MemoryZone zone(RRClass::IN(), Name("example.com"));
+    InMemoryZoneFinder zone(RRClass::IN(), Name("example.com"));
     EXPECT_EQ(Name("example.com"), zone.getOrigin());
     EXPECT_EQ(RRClass::IN(), zone.getClass());
 
-    MemoryZone ch_zone(RRClass::CH(), Name("example"));
+    InMemoryZoneFinder ch_zone(RRClass::CH(), Name("example"));
     EXPECT_EQ(Name("example"), ch_zone.getOrigin());
     EXPECT_EQ(RRClass::CH(), ch_zone.getClass());
 }
 
 TEST(ZoneTest, find) {
-    MemoryZone zone(RRClass::IN(), Name("example.com"));
-    EXPECT_EQ(Zone::NXDOMAIN,
+    InMemoryZoneFinder zone(RRClass::IN(), Name("example.com"));
+    EXPECT_EQ(ZoneFinder::NXDOMAIN,
               zone.find(Name("www.example.com"), RRType::A()).code);
 }
 
 class ZoneTableTest : public ::testing::Test {
 protected:
-    ZoneTableTest() : zone1(new MemoryZone(RRClass::IN(),
-                                           Name("example.com"))),
-                      zone2(new MemoryZone(RRClass::IN(),
-                                           Name("example.net"))),
-                      zone3(new MemoryZone(RRClass::IN(), Name("example")))
+    ZoneTableTest() : zone1(new InMemoryZoneFinder(RRClass::IN(),
+                                                   Name("example.com"))),
+                      zone2(new InMemoryZoneFinder(RRClass::IN(),
+                                                   Name("example.net"))),
+                      zone3(new InMemoryZoneFinder(RRClass::IN(),
+                                                   Name("example")))
     {}
     ZoneTable zone_table;
-    ZonePtr zone1, zone2, zone3;
+    ZoneFinderPtr zone1, zone2, zone3;
 };
 
 TEST_F(ZoneTableTest, addZone) {
@@ -60,7 +61,8 @@ TEST_F(ZoneTableTest, addZone) {
     EXPECT_EQ(result::EXIST, zone_table.addZone(zone1));
     // names are compared in a case insensitive manner.
     EXPECT_EQ(result::EXIST, zone_table.addZone(
-                  ZonePtr(new MemoryZone(RRClass::IN(), Name("EXAMPLE.COM")))));
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::IN(),
+                                                       Name("EXAMPLE.COM")))));
 
     EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone2));
     EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone3));
@@ -68,11 +70,11 @@ TEST_F(ZoneTableTest, addZone) {
     // Zone table is indexed only by name.  Duplicate origin name with
     // different zone class isn't allowed.
     EXPECT_EQ(result::EXIST, zone_table.addZone(
-                  ZonePtr(new MemoryZone(RRClass::CH(),
-                                         Name("example.com")))));
+                  ZoneFinderPtr(new InMemoryZoneFinder(RRClass::CH(),
+                                                       Name("example.com")))));
 
     /// Bogus zone (NULL)
-    EXPECT_THROW(zone_table.addZone(ZonePtr()), isc::InvalidParameter);
+    EXPECT_THROW(zone_table.addZone(ZoneFinderPtr()), isc::InvalidParameter);
 }
 
 TEST_F(ZoneTableTest, DISABLED_removeZone) {
@@ -95,7 +97,7 @@ TEST_F(ZoneTableTest, findZone) {
 
     EXPECT_EQ(result::NOTFOUND,
               zone_table.findZone(Name("example.org")).code);
-    EXPECT_EQ(ConstZonePtr(),
+    EXPECT_EQ(ConstZoneFinderPtr(),
               zone_table.findZone(Name("example.org")).zone);
 
     // there's no exact match.  the result should be the longest match,
@@ -107,7 +109,7 @@ TEST_F(ZoneTableTest, findZone) {
 
     // make sure the partial match is indeed the longest match by adding
     // a zone with a shorter origin and query again.
-    ZonePtr zone_com(new MemoryZone(RRClass::IN(), Name("com")));
+    ZoneFinderPtr zone_com(new InMemoryZoneFinder(RRClass::IN(), Name("com")));
     EXPECT_EQ(result::SUCCESS, zone_table.addZone(zone_com));
     EXPECT_EQ(Name("example.com"),
               zone_table.findZone(Name("www.example.com")).zone->getOrigin());
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index 1252c94..9fcd289 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -15,59 +15,128 @@
 #ifndef __ZONE_H
 #define __ZONE_H 1
 
-#include <datasrc/result.h>
+#include <dns/rrset.h>
 #include <dns/rrsetlist.h>
 
+#include <datasrc/result.h>
+
 namespace isc {
 namespace datasrc {
 
-/// \brief The base class for a single authoritative zone
+/// \brief The base class to search a zone for RRsets
 ///
-/// The \c Zone class is an abstract base class for representing
-/// a DNS zone as part of data source.
+/// The \c ZoneFinder class is an abstract base class for representing
+/// an object that performs DNS lookups in a specific zone accessible via
+/// a data source.  In general, different types of data sources (in-memory,
+/// database-based, etc) define their own derived classes of \c ZoneFinder,
+/// implementing ways to retrieve the required data through the common
+/// interfaces declared in the base class.  Each concrete \c ZoneFinder
+/// object is therefore (conceptually) associated with a specific zone
+/// of one specific data source instance.
 ///
-/// At the moment this is provided mainly for making the \c ZoneTable class
-/// and the authoritative query logic  testable, and only provides a minimal
-/// set of features.
-/// This is why this class is defined in the same header file, but it may
-/// have to move to a separate header file when we understand what is
-/// necessary for this class for actual operation.
+/// The origin name and the RR class of the associated zone are available
+/// via the \c getOrigin() and \c getClass() methods, respectively.
 ///
-/// The idea is to provide a specific derived zone class for each data
-/// source, beginning with in memory one.  At that point the derived classes
-/// will have more specific features.  For example, they will maintain
-/// information about the location of a zone file, whether it's loaded in
-/// memory, etc.
+/// The most important method of this class is \c find(), which performs
+/// the lookup for a given domain and type.  See the description of the
+/// method for details.
 ///
-/// It's not yet clear how the derived zone classes work with various other
-/// data sources when we integrate these components, but one possibility is
-/// something like this:
-/// - If the underlying database such as some variant of SQL doesn't have an
-///   explicit representation of zones (as part of public interface), we can
-///   probably use a "default" zone class that simply encapsulates the
-///   corresponding data source and calls a common "find" like method.
-/// - Some data source may want to specialize it by inheritance as an
-///   optimization.  For example, in the current schema design of the sqlite3
-///   data source, its (derived) zone class would contain the information of
-///   the "zone ID".
-///
-/// <b>Note:</b> Unlike some other abstract base classes we don't name the
-/// class beginning with "Abstract".  This is because we want to have
-/// commonly used definitions such as \c Result and \c ZonePtr, and we want
-/// to make them look more intuitive.
-class Zone {
+/// \note It's not clear whether we should request that a zone finder form a
+/// "transaction", that is, whether to ensure the finder is not susceptible
+/// to changes made by someone else than the creator of the finder.  If we
+/// don't request that, for example, two different lookup results for the
+/// same name and type can be different if other threads or programs make
+/// updates to the zone between the lookups.  We should revisit this point
+/// as we gain more experiences.
+class ZoneFinder {
 public:
     /// Result codes of the \c find() method.
     ///
     /// Note: the codes are tentative.  We may need more, or we may find
     /// some of them unnecessary as we implement more details.
+    ///
+    /// Some are synonyms of others in terms of RCODE returned to user.
+    /// But they help the logic to decide if it should ask for a NSEC
+    /// that covers something or not (for example, in case of NXRRSET,
+    /// the directly returned NSEC is sufficient, but with wildcard one,
+    /// we need to add one proving there's no exact match and this is
+    /// actually the best wildcard we have). Data sources that don't
+    /// support DNSSEC don't need to distinguish them.
+    ///
+    /// In case of CNAME, if the CNAME is a wildcard (i.e., its owner name
+    /// starts with the label "*"), WILDCARD_CNAME will be returned instead
+    /// of CNAME.
+    ///
+    /// In case of NXDOMAIN, the returned NSEC covers the queried domain
+    /// that proves that the query name does not exist in the zone.  Note that
+    /// this does not necessarily prove it doesn't even match a wildcard
+    /// (even if the result of NXDOMAIN can only happen when there's no
+    /// matching wildcard either).  It is caller's responsibility to provide
+    /// a proof that there is no matching wildcard if that proof is necessary.
+    ///
+    /// Various variants of "no data" cases are complicated, when involves
+    /// DNSSEC and wildcard processing.  Referring to Section 3.1.3 of
+    /// RFC4035, we need to consider the following cases:
+    /// -# (Normal) no data: there is a matching non-wildcard name with a
+    ///    different RR type.  This is the "No Data" case of the RFC.
+    /// -# (Normal) empty non terminal: there is no matching (exact or
+    ///    wildcard) name, but there is a subdomain with an RR of the query
+    ///    name.  This is one case of "Name Error" of the RFC.
+    /// -# Wildcard empty non terminal: similar to 2a, but the empty name
+    ///    is a wildcard, and matches the query name by wildcard expansion.
+    ///    This is a special case of "Name Error" of the RFC.
+    /// -# Wildcard no data: there is no exact match name, but there is a
+    ///    wildcard name that matches the query name with a different type
+    ///    of RR.  This is the "Wildcard No Data" case of the RFC.
+    ///
+    /// In any case, \c find() will result in \c NXRRSET with no RRset
+    /// unless the \c FIND_DNSSEC option is specified.  The rest of the
+    /// discussion only applies to the case where this option is specified.
+    ///
+    /// In case 1, \c find() will result in NXRRSET, and return NSEC of the
+    /// matching name.
+    ///
+    /// In case 2, \c find() will result in NXRRSET, and return NSEC for the
+    /// interval where the empty nonterminal lives. The end of the interval
+    /// is the subdomain causing existence of the empty nonterminal (if
+    /// there's sub.x.example.com, and no record in x.example.com, then
+    /// x.example.com exists implicitly - is the empty nonterminal and
+    /// sub.x.example.com is the subdomain causing it).  Note that this NSEC
+    /// proves not only the existence of empty non terminal name but also
+    /// the non existence of possibly matching wildcard name, because
+    /// there can be no better wildcard match than the exact matching empty
+    /// name.
+    ///
+    /// In case 3, \c find() will result in WILDCARD_NXRRSET, and return NSEC
+    /// for the interval where the wildcard empty nonterminal lives.
+    /// Cases 2 and 3 are especially complicated and confusing.  See the
+    /// examples below.
+    ///
+    /// In case 4, \c find() will result in WILDCARD_NXRRSET, and return
+    /// NSEC of the matching wildcard name.
+    ///
+    /// Examples: if zone "example.com" has the following record:
+    /// \code
+    /// a.example.com. NSEC a.b.example.com.
+    /// \endcode
+    /// a call to \c find() for "b.example.com." with the FIND_DNSSEC option
+    /// will result in NXRRSET, and this NSEC will be returned.
+    /// Likewise, if zone "example.org" has the following record,
+    /// \code
+    /// a.example.org. NSEC x.*.b.example.org.
+    /// \endcode
+    /// a call to \c find() for "y.b.example.org" with FIND_DNSSEC will
+    /// result in NXRRSET_NXRRSET, and this NSEC will be returned.
     enum Result {
         SUCCESS,                ///< An exact match is found.
         DELEGATION,             ///< The search encounters a zone cut.
         NXDOMAIN, ///< There is no domain name that matches the search name
         NXRRSET,  ///< There is a matching name but no RRset of the search type
         CNAME,    ///< The search encounters and returns a CNAME RR
-        DNAME     ///< The search encounters and returns a DNAME RR
+        DNAME,    ///< The search encounters and returns a DNAME RR
+        WILDCARD, ///< Succes by wildcard match, for DNSSEC
+        WILDCARD_CNAME, ///< CNAME on wildcard, search returns CNAME, for DNSSEC
+        WILDCARD_NXRRSET ///< NXRRSET on wildcard, for DNSSEC
     };
 
     /// A helper structure to represent the search result of \c find().
@@ -107,7 +176,12 @@ public:
     /// performed on these values to express compound options.
     enum FindOptions {
         FIND_DEFAULT = 0,       ///< The default options
-        FIND_GLUE_OK = 1        ///< Allow search under a zone cut
+        FIND_GLUE_OK = 1,       ///< Allow search under a zone cut
+        FIND_DNSSEC = 2,        ///< Require DNSSEC data in the answer
+                                ///< (RRSIG, NSEC, etc.). The implementation
+                                ///< is allowed to include it even if it is
+                                ///< not set.
+        NO_WILDCARD = 4         ///< Do not try wildcard matching.
     };
 
     ///
@@ -119,10 +193,10 @@ protected:
     ///
     /// This is intentionally defined as \c protected as this base class should
     /// never be instantiated (except as part of a derived class).
-    Zone() {}
+    ZoneFinder() {}
 public:
     /// The destructor.
-    virtual ~Zone() {}
+    virtual ~ZoneFinder() {}
     //@}
 
     ///
@@ -131,14 +205,14 @@ public:
     /// These methods should never throw an exception.
     //@{
     /// Return the origin name of the zone.
-    virtual const isc::dns::Name& getOrigin() const = 0;
+    virtual isc::dns::Name getOrigin() const = 0;
 
     /// Return the RR class of the zone.
-    virtual const isc::dns::RRClass& getClass() const = 0;
+    virtual isc::dns::RRClass getClass() const = 0;
     //@}
 
     ///
-    /// \name Search Method
+    /// \name Search Methods
     ///
     //@{
     /// Search the zone for a given pair of domain name and RR type.
@@ -147,6 +221,7 @@ public:
     /// for the data that best matches the given name and type.
     /// This method is expected to be "intelligent", and identifies the
     /// best possible answer for the search key.  Specifically,
+    ///
     /// - If the search name belongs under a zone cut, it returns the code
     ///   of \c DELEGATION and the NS RRset at the zone cut.
     /// - If there is no matching name, it returns the code of \c NXDOMAIN,
@@ -165,13 +240,15 @@ public:
     /// - If the target isn't NULL, all RRsets under the domain are inserted
     ///   there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned
     ///   instead of normall processing. This is intended to handle ANY query.
-    ///   \note: this behavior is controversial as we discussed in
-    ///   https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html
-    ///   We should revisit the interface before we heavily rely on it.
+    ///
+    /// \note This behavior is controversial as we discussed in
+    /// https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html
+    /// We should revisit the interface before we heavily rely on it.
     ///
     /// The \c options parameter specifies customized behavior of the search.
-    /// Their semantics is as follows:
-    /// - \c GLUE_OK Allow search under a zone cut.  By default the search
+    /// Their semantics is as follows (they are or bit-field):
+    ///
+    /// - \c FIND_GLUE_OK Allow search under a zone cut.  By default the search
     ///   will stop once it encounters a zone cut.  If this option is specified
     ///   it remembers information about the highest zone cut and continues
     ///   the search until it finds an exact match for the given name or it
@@ -179,6 +256,13 @@ public:
     ///   RRsets for that name are searched just like the normal case;
     ///   otherwise, if the search has encountered a zone cut, \c DELEGATION
     ///   with the information of the highest zone cut will be returned.
+    /// - \c FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are
+    ///   returned with the answer. It is allowed for the data source to
+    ///   include them even when not requested.
+    /// - \c NO_WILDCARD Do not try wildcard matching.  This option is of no
+    ///   use for normal lookups; it's intended to be used to get a DNSSEC
+    ///   proof of the non existence of any matching wildcard or non existence
+    ///   of an exact match when a wildcard match is found.
     ///
     /// A derived version of this method may involve internal resource
     /// allocation, especially for constructing the resulting RRset, and may
@@ -197,18 +281,379 @@ public:
                             const isc::dns::RRType& type,
                             isc::dns::RRsetList* target = NULL,
                             const FindOptions options
-                            = FIND_DEFAULT) const = 0;
+                            = FIND_DEFAULT) = 0;
+
+    /// \brief Get previous name in the zone
+    ///
+    /// Gets the previous name in the DNSSEC order. This can be used
+    /// to find the correct NSEC records for proving nonexistence
+    /// of domains.
+    ///
+    /// The concrete implementation might throw anything it thinks appropriate,
+    /// however it is recommended to stick to the ones listed here. The user
+    /// of this method should be able to handle any exceptions.
+    ///
+    /// This method does not include under-zone-cut data (glue data).
+    ///
+    /// \param query The name for which one we look for a previous one. The
+    ///     queried name doesn't have to exist in the zone.
+    /// \return The preceding name
+    ///
+    /// \throw NotImplemented in case the data source backend doesn't support
+    ///     DNSSEC or there is no previous in the zone (NSEC records might be
+    ///     missing in the DB, the queried name is less or equal to the apex).
+    /// \throw DataSourceError for low-level or internal datasource errors
+    ///     (like broken connection to database, wrong data living there).
+    /// \throw std::bad_alloc For allocation errors.
+    virtual isc::dns::Name findPreviousName(const isc::dns::Name& query)
+        const = 0;
     //@}
 };
 
-/// \brief A pointer-like type pointing to a \c Zone object.
-typedef boost::shared_ptr<Zone> ZonePtr;
+/// \brief Operator to combine FindOptions
+///
+/// We would need to manually static-cast the options if we put or
+/// between them, which is undesired with bit-flag options. Therefore
+/// we hide the cast here, which is the simplest solution and it still
+/// provides reasonable level of type safety.
+inline ZoneFinder::FindOptions operator |(ZoneFinder::FindOptions a,
+                                          ZoneFinder::FindOptions b)
+{
+    return (static_cast<ZoneFinder::FindOptions>(static_cast<unsigned>(a) |
+                                                 static_cast<unsigned>(b)));
+}
 
-/// \brief A pointer-like type pointing to a \c Zone object.
-typedef boost::shared_ptr<const Zone> ConstZonePtr;
+/// \brief A pointer-like type pointing to a \c ZoneFinder object.
+typedef boost::shared_ptr<ZoneFinder> ZoneFinderPtr;
 
-}
-}
+/// \brief A pointer-like type pointing to a \c ZoneFinder object.
+typedef boost::shared_ptr<const ZoneFinder> ConstZoneFinderPtr;
+
+/// The base class to make updates to a single zone.
+///
+/// On construction, each derived class object will start a "transaction"
+/// for making updates to a specific zone (this means a constructor of
+/// a derived class would normally take parameters to identify the zone
+/// to be updated).  The underlying realization of a "transaction" will differ
+/// for different derived classes; if it uses a general purpose database
+/// as a backend, it will involve performing some form of "begin transaction"
+/// statement for the database.
+///
+/// Updates (adding or deleting RRs) are made via \c addRRset() and
+/// \c deleteRRset() methods.  Until the \c commit() method is called the
+/// changes are local to the updater object.  For example, they won't be
+/// visible via a \c ZoneFinder object except the one returned by the
+/// updater's own \c getFinder() method.  The \c commit() completes the
+/// transaction and makes the changes visible to others.
+///
+/// This class does not provide an explicit "rollback" interface.  If
+/// something wrong or unexpected happens during the updates and the
+/// caller wants to cancel the intermediate updates, the caller should
+/// simply destruct the updater object without calling \c commit().
+/// The destructor is supposed to perform the "rollback" operation,
+/// depending on the internal details of the derived class.
+///
+/// \note This initial implementation provides a quite simple interface of
+/// adding and deleting RRs (see the description of the related methods).
+/// It may be revisited as we gain more experiences.
+class ZoneUpdater {
+protected:
+    /// The default constructor.
+    ///
+    /// This is intentionally defined as protected to ensure that this base
+    /// class is never instantiated directly.
+    ZoneUpdater() {}
+
+public:
+    /// The destructor
+    ///
+    /// Each derived class implementation must ensure that if \c commit()
+    /// has not been performed by the time of the call to it, then it
+    /// "rollbacks" the updates made via the updater so far.
+    virtual ~ZoneUpdater() {}
+
+    /// Return a finder for the zone being updated.
+    ///
+    /// The returned finder provides the functionalities of \c ZoneFinder
+    /// for the zone as updates are made via the updater.  That is, before
+    /// making any update, the finder will be able to find all RRsets that
+    /// exist in the zone at the time the updater is created.  If RRsets
+    /// are added or deleted via \c addRRset() or \c deleteRRset(),
+    /// this finder will find the added ones or miss the deleted ones
+    /// respectively.
+    ///
+    /// The finder returned by this method is effective only while the updates
+    /// are performed, i.e., from the construction of the corresponding
+    /// updater until \c commit() is performed or the updater is destructed
+    /// without commit.  The result of a subsequent call to this method (or
+    /// the use of the result) after that is undefined.
+    ///
+    /// \return A reference to a \c ZoneFinder for the updated zone
+    virtual ZoneFinder& getFinder() = 0;
+
+    /// Add an RRset to a zone via the updater
+    ///
+    /// This may be revisited in a future version, but right now the intended
+    /// behavior of this method is simple: It "naively" adds the specified
+    /// RRset to the zone specified on creation of the updater.
+    /// It performs minimum level of validation on the specified RRset:
+    /// - Whether the RR class is identical to that for the zone to be updated
+    /// - Whether the RRset is not empty, i.e., it has at least one RDATA
+    /// - Whether the RRset is not associated with an RRSIG, i.e.,
+    ///   whether \c getRRsig() on the RRset returns a NULL pointer.
+    ///
+    /// and otherwise does not check any oddity.  For example, it doesn't
+    /// check whether the owner name of the specified RRset is a subdomain
+    /// of the zone's origin; it doesn't care whether or not there is already
+    /// an RRset of the same name and RR type in the zone, and if there is,
+    /// whether any of the existing RRs have duplicate RDATA with the added
+    /// ones.  If these conditions matter the calling application must examine
+    /// the existing data beforehand using the \c ZoneFinder returned by
+    /// \c getFinder().
+    ///
+    /// The validation requirement on the associated RRSIG is temporary.
+    /// If we find it more reasonable and useful to allow adding a pair of
+    /// RRset and its RRSIG RRset as we gain experiences with the interface,
+    /// we may remove this restriction.  Until then we explicitly check it
+    /// to prevent accidental misuse.
+    ///
+    /// Conceptually, on successful call to this method, the zone will have
+    /// the specified RRset, and if there is already an RRset of the same
+    /// name and RR type, these two sets will be "merged".  "Merged" means
+    /// that a subsequent call to \c ZoneFinder::find() for the name and type
+    /// will result in success and the returned RRset will contain all
+    /// previously existing and newly added RDATAs with the TTL being the
+    /// minimum of the two RRsets.  The underlying representation of the
+    /// "merged" RRsets may vary depending on the characteristic of the
+    /// underlying data source.  For example, if it uses a general purpose
+    /// database that stores each RR of the same RRset separately, it may
+    /// simply be a larger sets of RRs based on both the existing and added
+    /// RRsets; the TTLs of the RRs may be different within the database, and
+    /// there may even be duplicate RRs in different database rows.  As long
+    /// as the RRset returned via \c ZoneFinder::find() conforms to the
+    /// concept of "merge", the actual internal representation is up to the
+    /// implementation.
+    ///
+    /// This method must not be called once commit() is performed.  If it
+    /// calls after \c commit() the implementation must throw a
+    /// \c DataSourceError exception.
+    ///
+    /// If journaling was requested when getting this updater, it will reject
+    /// to add the RRset if the squence doesn't look like and IXFR (see
+    /// DataSourceClient::getUpdater). In such case isc::BadValue is thrown.
+    ///
+    /// \todo As noted above we may have to revisit the design details as we
+    /// gain experiences:
+    ///
+    /// - we may want to check (and maybe reject) if there is already a
+    /// duplicate RR (that has the same RDATA).
+    /// - we may want to check (and maybe reject) if there is already an
+    /// RRset of the same name and RR type with different TTL
+    /// - we may even want to check if there is already any RRset of the
+    /// same name and RR type.
+    /// - we may want to add an "options" parameter that can control the
+    /// above points
+    /// - we may want to have this method return a value containing the
+    /// information on whether there's a duplicate, etc.
+    ///
+    /// \exception DataSourceError Called after \c commit(), RRset is invalid
+    /// (see above), internal data source error
+    /// \exception isc::BadValue Journaling is enabled and the current RRset
+    ///   doesn't fit into the IXFR sequence (see above).
+    /// \exception std::bad_alloc Resource allocation failure
+    ///
+    /// \param rrset The RRset to be added
+    virtual void addRRset(const isc::dns::RRset& rrset) = 0;
+
+    /// Delete an RRset from a zone via the updater
+    ///
+    /// Like \c addRRset(), the detailed semantics and behavior of this method
+    /// may have to be revisited in a future version.  The following are
+    /// based on the initial implementation decisions.
+    ///
+    /// On successful completion of this method, it will remove from the zone
+    /// the RRs of the specified owner name and RR type that match one of
+    /// the RDATAs of the specified RRset.  There are several points to be
+    /// noted:
+    /// - Existing RRs that don't match any of the specified RDATAs will
+    ///   remain in the zone.
+    /// - Any RRs of the specified RRset that doesn't exist in the zone will
+    ///   simply be ignored; the implementation of this method is not supposed
+    ///   to check that condition.
+    /// - The TTL of the RRset is ignored; matching is only performed by
+    ///   the owner name, RR type and RDATA
+    ///
+    /// Ignoring the TTL may not look sensible, but it's based on the
+    /// observation that it will result in more intuitive result, especially
+    /// when the underlying data source is a general purpose database.
+    /// See also \c DatabaseAccessor::deleteRecordInZone() on this point.
+    /// It also matches the dynamic update protocol (RFC2136), where TTLs
+    /// are ignored when deleting RRs.
+    ///
+    /// \note Since the TTL is ignored, this method could take the RRset
+    /// to be deleted as a tuple of name, RR type, and a list of RDATAs.
+    /// But in practice, it's quite likely that the caller has the RRset
+    /// in the form of the \c RRset object (e.g., extracted from a dynamic
+    /// update request message), so this interface would rather be more
+    /// convenient.  If it turns out not to be true we can change or extend
+    /// the method signature.
+    ///
+    /// This method performs minimum level of validation on the specified
+    /// RRset:
+    /// - Whether the RR class is identical to that for the zone to be updated
+    /// - Whether the RRset is not empty, i.e., it has at least one RDATA
+    /// - Whether the RRset is not associated with an RRSIG, i.e.,
+    ///   whether \c getRRsig() on the RRset returns a NULL pointer.
+    ///
+    /// This method must not be called once commit() is performed.  If it
+    /// calls after \c commit() the implementation must throw a
+    /// \c DataSourceError exception.
+    ///
+    /// If journaling was requested when getting this updater, it will reject
+    /// to add the RRset if the squence doesn't look like and IXFR (see
+    /// DataSourceClient::getUpdater). In such case isc::BadValue is thrown.
+    ///
+    /// \todo As noted above we may have to revisit the design details as we
+    /// gain experiences:
+    ///
+    /// - we may want to check (and maybe reject) if some or all of the RRs
+    ///   for the specified RRset don't exist in the zone
+    /// - we may want to allow an option to "delete everything" for specified
+    ///   name and/or specified name + RR type.
+    /// - as mentioned above, we may want to include the TTL in matching the
+    ///   deleted RRs
+    /// - we may want to add an "options" parameter that can control the
+    ///   above points
+    /// - we may want to have this method return a value containing the
+    ///   information on whether there's any RRs that are specified but don't
+    ///   exit, the number of actually deleted RRs, etc.
+    ///
+    /// \exception DataSourceError Called after \c commit(), RRset is invalid
+    /// (see above), internal data source error
+    /// \exception isc::BadValue Journaling is enabled and the current RRset
+    ///   doesn't fit into the IXFR sequence (see above).
+    /// \exception std::bad_alloc Resource allocation failure
+    ///
+    /// \param rrset The RRset to be deleted
+    virtual void deleteRRset(const isc::dns::RRset& rrset) = 0;
+
+    /// Commit the updates made in the updater to the zone
+    ///
+    /// This method completes the "transaction" started at the creation
+    /// of the updater.  After successful completion of this method, the
+    /// updates will be visible outside the scope of the updater.
+    /// The actual internal behavior will defer for different derived classes.
+    /// For a derived class with a general purpose database as a backend,
+    /// for example, this method would perform a "commit" statement for the
+    /// database.
+    ///
+    /// This operation can only be performed at most once.  A duplicate call
+    /// must result in a DatasourceError exception.
+    ///
+    /// \exception DataSourceError Duplicate call of the method,
+    /// internal data source error
+    /// \exception isc::BadValue Journaling is enabled and the update is not
+    ///    complete IXFR sequence.
+    virtual void commit() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneUpdater> ZoneUpdaterPtr;
+
+/// The base class for retrieving differences between two versions of a zone.
+///
+/// On construction, each derived class object will internally set up
+/// retrieving sequences of differences between two specific version of
+/// a specific zone managed in a particular data source.  So the constructor
+/// of a derived class would normally take parameters to identify the zone
+/// and the two versions for which the differences should be retrieved.
+/// See \c DataSourceClient::getJournalReader for more concrete details
+/// used in this API.
+///
+/// Once constructed, an object of this class will act like an iterator
+/// over the sequences.  Every time the \c getNextDiff() method is called
+/// it returns one element of the differences in the form of an \c RRset
+/// until it reaches the end of the entire sequences.
+class ZoneJournalReader {
+public:
+    /// Result codes used by a factory method for \c ZoneJournalReader
+    enum Result {
+        SUCCESS, ///< A \c ZoneJournalReader object successfully created
+        NO_SUCH_ZONE, ///< Specified zone does not exist in the data source
+        NO_SUCH_VERSION ///< Specified versions do not exist in the diff storage
+    };
+
+protected:
+    /// The default constructor.
+    ///
+    /// This is intentionally defined as protected to ensure that this base
+    /// class is never instantiated directly.
+    ZoneJournalReader() {}
+
+public:
+    /// The destructor
+    virtual ~ZoneJournalReader() {}
+
+    /// Return the next difference RR of difference sequences.
+    ///
+    /// In this API, the difference between two versions of a zone is
+    /// conceptually represented as IXFR-style difference sequences:
+    /// Each difference sequence is a sequence of RRs: an older version of
+    /// SOA (to be deleted), zero or more other deleted RRs, the
+    /// post-transaction SOA (to be added), and zero or more other
+    /// added RRs.  (Note, however, that the underlying data source
+    /// implementation may or may not represent the difference in
+    /// straightforward realization of this concept.  The mapping between
+    /// the conceptual difference and the actual implementation is hidden
+    /// in each derived class).
+    ///
+    /// This method provides an application with a higher level interface
+    /// to retrieve the difference along with the conceptual model: the
+    /// \c ZoneJournalReader object iterates over the entire sequences
+    /// from the beginning SOA (which is to be deleted) to one of the
+    /// added RR of with the ending SOA, and each call to this method returns
+    /// one RR in the form of an \c RRset that contains exactly one RDATA
+    /// in the order of the sequences.
+    ///
+    /// Note that the ordering of the sequences specifies the semantics of
+    /// each difference: add or delete.  For example, the first RR is to
+    /// be deleted, and the last RR is to be added.  So the return value
+    /// of this method does not explicitly indicate whether the RR is to be
+    /// added or deleted.
+    ///
+    /// This method ensures the returned \c RRset represents an RR, that is,
+    /// it contains exactly one RDATA.  However, it does not necessarily
+    /// ensure that the resulting sequences are in the form of IXFR-style.
+    /// For example, the first RR is supposed to be an SOA, and it should
+    /// normally be the case, but this interface does not necessarily require
+    /// the derived class implementation ensure this.  Normally the
+    /// differences are expected to be stored using this API (via a
+    /// \c ZoneUpdater object), and as long as that is the case and the
+    /// underlying implementation follows the requirement of the API, the
+    /// result of this method should be a valid IXFR-style sequences.
+    /// So this API does not mandate the almost redundant check as part of
+    /// the interface.  If the application needs to make it sure 100%, it
+    /// must check the resulting sequence itself.
+    ///
+    /// Once the object reaches the end of the sequences, this method returns
+    /// \c Null.  Any subsequent call will result in an exception of
+    /// class \c InvalidOperation.
+    ///
+    /// \exception InvalidOperation The method is called beyond the end of
+    /// the difference sequences.
+    /// \exception DataSourceError Underlying data is broken and the RR
+    /// cannot be created or other low level data source error.
+    ///
+    /// \return An \c RRset that contains one RDATA corresponding to the
+    /// next difference in the sequences.
+    virtual isc::dns::ConstRRsetPtr getNextDiff() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneJournalReader> ZoneJournalReaderPtr;
+
+} // end of datasrc
+} // end of isc
 
 #endif  // __ZONE_H
 
diff --git a/src/lib/datasrc/zonetable.cc b/src/lib/datasrc/zonetable.cc
index bc09286..644861c 100644
--- a/src/lib/datasrc/zonetable.cc
+++ b/src/lib/datasrc/zonetable.cc
@@ -28,8 +28,8 @@ namespace datasrc {
 /// \short Private data and implementation of ZoneTable
 struct ZoneTable::ZoneTableImpl {
     // Type aliases to make it shorter
-    typedef RBTree<Zone> ZoneTree;
-    typedef RBNode<Zone> ZoneNode;
+    typedef RBTree<ZoneFinder> ZoneTree;
+    typedef RBNode<ZoneFinder> ZoneNode;
     // The actual storage
     ZoneTree zones_;
 
@@ -40,7 +40,7 @@ struct ZoneTable::ZoneTableImpl {
      */
 
     // Implementation of ZoneTable::addZone
-    result::Result addZone(ZonePtr zone) {
+    result::Result addZone(ZoneFinderPtr zone) {
         // Sanity check
         if (!zone) {
             isc_throw(InvalidParameter,
@@ -85,12 +85,12 @@ struct ZoneTable::ZoneTableImpl {
                 break;
             // We have no data there, so translate the pointer to NULL as well
             case ZoneTree::NOTFOUND:
-                return (FindResult(result::NOTFOUND, ZonePtr()));
+                return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
             // Can Not Happen
             default:
                 assert(0);
                 // Because of warning
-                return (FindResult(result::NOTFOUND, ZonePtr()));
+                return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
         }
 
         // Can Not Happen (remember, NOTFOUND is handled)
@@ -108,7 +108,7 @@ ZoneTable::~ZoneTable() {
 }
 
 result::Result
-ZoneTable::addZone(ZonePtr zone) {
+ZoneTable::addZone(ZoneFinderPtr zone) {
     return (impl_->addZone(zone));
 }
 
diff --git a/src/lib/datasrc/zonetable.h b/src/lib/datasrc/zonetable.h
index 5b873d1..5a34480 100644
--- a/src/lib/datasrc/zonetable.h
+++ b/src/lib/datasrc/zonetable.h
@@ -41,11 +41,11 @@ namespace datasrc {
 class ZoneTable {
 public:
     struct FindResult {
-        FindResult(result::Result param_code, const ZonePtr param_zone) :
+        FindResult(result::Result param_code, const ZoneFinderPtr param_zone) :
             code(param_code), zone(param_zone)
         {}
         const result::Result code;
-        const ZonePtr zone;
+        const ZoneFinderPtr zone;
     };
     ///
     /// \name Constructors and Destructor.
@@ -83,7 +83,7 @@ public:
     /// added to the zone table.
     /// \return \c result::EXIST The zone table already contains
     /// zone of the same origin.
-    result::Result addZone(ZonePtr zone);
+    result::Result addZone(ZoneFinderPtr zone);
 
     /// Remove a \c Zone of the given origin name from the \c ZoneTable.
     ///
diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am
new file mode 100644
index 0000000..3991033
--- /dev/null
+++ b/src/lib/dhcp/Makefile.am
@@ -0,0 +1,27 @@
+SUBDIRS = . tests
+
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+CLEANFILES = *.gcno *.gcda
+
+lib_LTLIBRARIES = libdhcp.la
+libdhcp_la_SOURCES  =
+libdhcp_la_SOURCES += libdhcp.cc libdhcp.h
+libdhcp_la_SOURCES += option.cc option.h
+libdhcp_la_SOURCES += option6_ia.cc option6_ia.h
+libdhcp_la_SOURCES += option6_iaaddr.cc option6_iaaddr.h
+libdhcp_la_SOURCES += option6_addrlst.cc option6_addrlst.h
+libdhcp_la_SOURCES += option4_addrlst.cc option4_addrlst.h
+libdhcp_la_SOURCES += dhcp6.h dhcp4.h
+libdhcp_la_SOURCES += pkt6.cc pkt6.h
+libdhcp_la_SOURCES += pkt4.cc pkt4.h
+
+EXTRA_DIST  = README
+#EXTRA_DIST += log_messages.mes
+
+libdhcp_la_CXXFLAGS = $(AM_CXXFLAGS)
+libdhcp_la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
+libdhcp_la_LIBADD   = $(top_builddir)/src/lib/util/libutil.la
diff --git a/src/lib/dhcp/README b/src/lib/dhcp/README
new file mode 100644
index 0000000..6c5353d
--- /dev/null
+++ b/src/lib/dhcp/README
@@ -0,0 +1,11 @@
+This directory holds implementation for libdhcp.
+
+
+Basic Ideas
+===========
+
+
+Notes
+=====
+This work just begun. Don't expect to see much useful code here.
+We are working on it.
\ No newline at end of file
diff --git a/src/lib/dhcp/dhcp4.h b/src/lib/dhcp/dhcp4.h
new file mode 100644
index 0000000..98381ac
--- /dev/null
+++ b/src/lib/dhcp/dhcp4.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2004-2011 by Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (c) 1995-2003 by Internet Software Consortium
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ *   Internet Systems Consortium, Inc.
+ *   950 Charter Street
+ *   Redwood City, CA 94063
+ *   <info at isc.org>
+ *   https://www.isc.org/
+ *
+ * This software has been written for Internet Systems Consortium
+ * by Ted Lemon in cooperation with Vixie Enterprises.  To learn more
+ * about Internet Systems Consortium, see ``https://www.isc.org''.
+ * To learn more about Vixie Enterprises, see ``http://www.vix.com''.
+ */
+
+/*
+ * NOTE: This files is imported from ISC DHCP. It uses C notation.
+ *       Format kept for easier merge.
+ */
+
+#ifndef DHCP_H
+#define DHCP_H
+
+#include <stdint.h>
+
+namespace isc {
+namespace dhcp {
+
+/* BOOTP (rfc951) message types */
+enum BOOTPTypes {
+    BOOTREQUEST = 1,
+    BOOTREPLY = 2
+};
+
+/* Possible values for flags field... */
+static const uint16_t BOOTP_BROADCAST = 32768L;
+
+/* Possible values for hardware type (htype) field... */
+enum HType {
+    HTYPE_ETHER = 1,   /* Ethernet 10Mbps */
+    HTYPE_IEEE802 = 6, /* IEEE 802.2 Token Ring */
+    HTYPE_FDDI = 8     /* FDDI */
+    /// TODO Add infiniband here
+};
+
+/* DHCP Option codes: */
+enum DHCPOptionType {
+    DHO_PAD                          = 0,
+    DHO_SUBNET_MASK                  = 1,
+    DHO_TIME_OFFSET                  = 2,
+    DHO_ROUTERS                      = 3,
+    DHO_TIME_SERVERS                 = 4,
+    DHO_NAME_SERVERS                 = 5,
+    DHO_DOMAIN_NAME_SERVERS          = 6,
+    DHO_LOG_SERVERS                  = 7,
+    DHO_COOKIE_SERVERS               = 8,
+    DHO_LPR_SERVERS                  = 9,
+    DHO_IMPRESS_SERVERS              = 10,
+    DHO_RESOURCE_LOCATION_SERVERS    = 11,
+    DHO_HOST_NAME                    = 12,
+    DHO_BOOT_SIZE                    = 13,
+    DHO_MERIT_DUMP                   = 14,
+    DHO_DOMAIN_NAME                  = 15,
+    DHO_SWAP_SERVER                  = 16,
+    DHO_ROOT_PATH                    = 17,
+    DHO_EXTENSIONS_PATH              = 18,
+    DHO_IP_FORWARDING                = 19,
+    DHO_NON_LOCAL_SOURCE_ROUTING     = 20,
+    DHO_POLICY_FILTER                = 21,
+    DHO_MAX_DGRAM_REASSEMBLY         = 22,
+    DHO_DEFAULT_IP_TTL               = 23,
+    DHO_PATH_MTU_AGING_TIMEOUT       = 24,
+    DHO_PATH_MTU_PLATEAU_TABLE       = 25,
+    DHO_INTERFACE_MTU                = 26,
+    DHO_ALL_SUBNETS_LOCAL            = 27,
+    DHO_BROADCAST_ADDRESS            = 28,
+    DHO_PERFORM_MASK_DISCOVERY       = 29,
+    DHO_MASK_SUPPLIER                = 30,
+    DHO_ROUTER_DISCOVERY             = 31,
+    DHO_ROUTER_SOLICITATION_ADDRESS  = 32,
+    DHO_STATIC_ROUTES                = 33,
+    DHO_TRAILER_ENCAPSULATION        = 34,
+    DHO_ARP_CACHE_TIMEOUT            = 35,
+    DHO_IEEE802_3_ENCAPSULATION      = 36,
+    DHO_DEFAULT_TCP_TTL              = 37,
+    DHO_TCP_KEEPALIVE_INTERVAL       = 38,
+    DHO_TCP_KEEPALIVE_GARBAGE        = 39,
+    DHO_NIS_DOMAIN                   = 40,
+    DHO_NIS_SERVERS                  = 41,
+    DHO_NTP_SERVERS                  = 42,
+    DHO_VENDOR_ENCAPSULATED_OPTIONS  = 43,
+    DHO_NETBIOS_NAME_SERVERS         = 44,
+    DHO_NETBIOS_DD_SERVER            = 45,
+    DHO_NETBIOS_NODE_TYPE            = 46,
+    DHO_NETBIOS_SCOPE                = 47,
+    DHO_FONT_SERVERS                 = 48,
+    DHO_X_DISPLAY_MANAGER            = 49,
+    DHO_DHCP_REQUESTED_ADDRESS       = 50,
+    DHO_DHCP_LEASE_TIME              = 51,
+    DHO_DHCP_OPTION_OVERLOAD         = 52,
+    DHO_DHCP_MESSAGE_TYPE            = 53,
+    DHO_DHCP_SERVER_IDENTIFIER       = 54,
+    DHO_DHCP_PARAMETER_REQUEST_LIST  = 55,
+    DHO_DHCP_MESSAGE                 = 56,
+    DHO_DHCP_MAX_MESSAGE_SIZE        = 57,
+    DHO_DHCP_RENEWAL_TIME            = 58,
+    DHO_DHCP_REBINDING_TIME          = 59,
+    DHO_VENDOR_CLASS_IDENTIFIER      = 60,
+    DHO_DHCP_CLIENT_IDENTIFIER       = 61,
+    DHO_NWIP_DOMAIN_NAME             = 62,
+    DHO_NWIP_SUBOPTIONS              = 63,
+    DHO_USER_CLASS                   = 77,
+    DHO_FQDN                         = 81,
+    DHO_DHCP_AGENT_OPTIONS           = 82,
+    DHO_AUTHENTICATE                 = 90,  /* RFC3118, was 210 */
+    DHO_CLIENT_LAST_TRANSACTION_TIME = 91,
+    DHO_ASSOCIATED_IP                = 92,
+    DHO_SUBNET_SELECTION             = 118, /* RFC3011! */
+    DHO_DOMAIN_SEARCH                = 119, /* RFC3397 */
+    DHO_VIVCO_SUBOPTIONS             = 124,
+    DHO_VIVSO_SUBOPTIONS             = 125,
+
+    DHO_END                          = 255
+};
+
+/* DHCP message types. */
+enum DHCPMessageType {
+    DHCPDISCOVER        =  1,
+    DHCPOFFER           =  2,
+    DHCPREQUEST         =  3,
+    DHCPDECLINE         =  4,
+    DHCPACK             =  5,
+    DHCPNAK             =  6,
+    DHCPRELEASE         =  7,
+    DHCPINFORM          =  8,
+    DHCPLEASEQUERY      =  10,
+    DHCPLEASEUNASSIGNED =  11,
+    DHCPLEASEUNKNOWN    =  12,
+    DHCPLEASEACTIVE     =  13
+};
+
+static const uint16_t DHCP4_CLIENT_PORT = 68;
+static const uint16_t DHCP4_SERVER_PORT = 67;
+
+/// Magic cookie validating dhcp options field (and bootp vendor
+/// extensions field).
+///static const char* DHCP_OPTIONS_COOKIE = "\143\202\123\143";
+
+// TODO: Following are leftovers from dhcp.h import from ISC DHCP
+// They will be converted to C++-style defines once they will start
+// to be used.
+#if 0
+/* Relay Agent Information option subtypes: */
+#define RAI_CIRCUIT_ID  1
+#define RAI_REMOTE_ID   2
+#define RAI_AGENT_ID    3
+#define RAI_LINK_SELECT 5
+
+/* FQDN suboptions: */
+#define FQDN_NO_CLIENT_UPDATE           1
+#define FQDN_SERVER_UPDATE              2
+#define FQDN_ENCODED                    3
+#define FQDN_RCODE1                     4
+#define FQDN_RCODE2                     5
+#define FQDN_HOSTNAME                   6
+#define FQDN_DOMAINNAME                 7
+#define FQDN_FQDN                       8
+#define FQDN_SUBOPTION_COUNT            8
+
+/* Enterprise Suboptions: */
+#define VENDOR_ISC_SUBOPTIONS           2495
+
+#endif
+
+} // end of isc::dhcp namespace
+} // end of isc namespace
+
+#endif /* DHCP_H */
diff --git a/src/lib/dhcp/dhcp6.h b/src/lib/dhcp/dhcp6.h
new file mode 100644
index 0000000..6012003
--- /dev/null
+++ b/src/lib/dhcp/dhcp6.h
@@ -0,0 +1,184 @@
+// Copyright (C) 2006-2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef DHCP6_H
+#define DHCP6_H
+
+/* DHCPv6 Option codes: */
+
+#define D6O_CLIENTID                            1 /* RFC3315 */
+#define D6O_SERVERID                            2
+#define D6O_IA_NA                               3
+#define D6O_IA_TA                               4
+#define D6O_IAADDR                              5
+#define D6O_ORO                                 6
+#define D6O_PREFERENCE                          7
+#define D6O_ELAPSED_TIME                        8
+#define D6O_RELAY_MSG                           9
+/* Option code 10 unassigned. */
+#define D6O_AUTH                                11
+#define D6O_UNICAST                             12
+#define D6O_STATUS_CODE                         13
+#define D6O_RAPID_COMMIT                        14
+#define D6O_USER_CLASS                          15
+#define D6O_VENDOR_CLASS                        16
+#define D6O_VENDOR_OPTS                         17
+#define D6O_INTERFACE_ID                        18
+#define D6O_RECONF_MSG                          19
+#define D6O_RECONF_ACCEPT                       20
+#define D6O_SIP_SERVERS_DNS                     21 /* RFC3319 */
+#define D6O_SIP_SERVERS_ADDR                    22 /* RFC3319 */
+#define D6O_NAME_SERVERS                        23 /* RFC3646 */
+#define D6O_DOMAIN_SEARCH                       24 /* RFC3646 */
+#define D6O_IA_PD                               25 /* RFC3633 */
+#define D6O_IAPREFIX                            26 /* RFC3633 */
+#define D6O_NIS_SERVERS                         27 /* RFC3898 */
+#define D6O_NISP_SERVERS                        28 /* RFC3898 */
+#define D6O_NIS_DOMAIN_NAME                     29 /* RFC3898 */
+#define D6O_NISP_DOMAIN_NAME                    30 /* RFC3898 */
+#define D6O_SNTP_SERVERS                        31 /* RFC4075 */
+#define D6O_INFORMATION_REFRESH_TIME            32 /* RFC4242 */
+#define D6O_BCMCS_SERVER_D                      33 /* RFC4280 */
+#define D6O_BCMCS_SERVER_A                      34 /* RFC4280 */
+/* 35 is unassigned */
+#define D6O_GEOCONF_CIVIC                       36 /* RFC4776 */
+#define D6O_REMOTE_ID                           37 /* RFC4649 */
+#define D6O_SUBSCRIBER_ID                       38 /* RFC4580 */
+#define D6O_CLIENT_FQDN                         39 /* RFC4704 */
+#define D6O_PANA_AGENT                          40 /* paa-option */
+#define D6O_NEW_POSIX_TIMEZONE                  41 /* RFC4833 */
+#define D6O_NEW_TZDB_TIMEZONE                   42 /* RFC4833 */
+#define D6O_ERO                                 43 /* RFC4994 */
+#define D6O_LQ_QUERY                            44 /* RFC5007 */
+#define D6O_CLIENT_DATA                         45 /* RFC5007 */
+#define D6O_CLT_TIME                            46 /* RFC5007 */
+#define D6O_LQ_RELAY_DATA                       47 /* RFC5007 */
+#define D6O_LQ_CLIENT_LINK                      48 /* RFC5007 */
+
+/*
+ * Status Codes, from RFC 3315 section 24.4, and RFC 3633, 5007.
+ */
+#define STATUS_Success           0
+#define STATUS_UnspecFail        1
+#define STATUS_NoAddrsAvail      2
+#define STATUS_NoBinding         3
+#define STATUS_NotOnLink         4
+#define STATUS_UseMulticast      5
+#define STATUS_NoPrefixAvail     6
+#define STATUS_UnknownQueryType  7
+#define STATUS_MalformedQuery    8
+#define STATUS_NotConfigured     9
+#define STATUS_NotAllowed       10
+
+/*
+ * DHCPv6 message types, defined in section 5.3 of RFC 3315
+ */
+#define DHCPV6_SOLICIT              1
+#define DHCPV6_ADVERTISE            2
+#define DHCPV6_REQUEST              3
+#define DHCPV6_CONFIRM              4
+#define DHCPV6_RENEW                5
+#define DHCPV6_REBIND               6
+#define DHCPV6_REPLY                7
+#define DHCPV6_RELEASE              8
+#define DHCPV6_DECLINE              9
+#define DHCPV6_RECONFIGURE         10
+#define DHCPV6_INFORMATION_REQUEST 11
+#define DHCPV6_RELAY_FORW          12
+#define DHCPV6_RELAY_REPL          13
+#define DHCPV6_LEASEQUERY          14
+#define DHCPV6_LEASEQUERY_REPLY    15
+
+extern const char *dhcpv6_type_names[];
+extern const int dhcpv6_type_name_max;
+
+/* DUID type definitions (RFC3315 section 9).
+ */
+#define DUID_LLT        1
+#define DUID_EN         2
+#define DUID_LL         3
+
+/* Offsets into IA_*'s where Option spaces commence.  */
+#define IA_NA_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
+#define IA_TA_OFFSET  4 /* IAID only, 4 octets */
+#define IA_PD_OFFSET 12 /* IAID, T1, T2, all 4 octets each */
+
+/* Offset into IAADDR's where Option spaces commence. */
+#define IAADDR_OFFSET 24
+
+/* Offset into IAPREFIX's where Option spaces commence. */
+#define IAPREFIX_OFFSET 25
+
+/* Offset into LQ_QUERY's where Option spaces commence. */
+#define LQ_QUERY_OFFSET 17
+
+/*
+ * DHCPv6 well-known multicast addressess, from section 5.1 of RFC 3315
+ */
+#define ALL_DHCP_RELAY_AGENTS_AND_SERVERS "ff02::1:2"
+#define ALL_DHCP_SERVERS "ff05::1:3"
+
+#define DHCP6_CLIENT_PORT 546
+#define DHCP6_SERVER_PORT 547
+
+/*
+ * DHCPv6 Retransmission Constants (RFC3315 section 5.5, RFC 5007)
+ */
+
+#define SOL_MAX_DELAY     1
+#define SOL_TIMEOUT       1
+#define SOL_MAX_RT      120
+#define REQ_TIMEOUT       1
+#define REQ_MAX_RT       30
+#define REQ_MAX_RC       10
+#define CNF_MAX_DELAY     1
+#define CNF_TIMEOUT       1
+#define CNF_MAX_RT        4
+#define CNF_MAX_RD       10
+#define REN_TIMEOUT      10
+#define REN_MAX_RT      600
+#define REB_TIMEOUT      10
+#define REB_MAX_RT      600
+#define INF_MAX_DELAY     1
+#define INF_TIMEOUT       1
+#define INF_MAX_RT      120
+#define REL_TIMEOUT       1
+#define REL_MAX_RC        5
+#define DEC_TIMEOUT       1
+#define DEC_MAX_RC        5
+#define REC_TIMEOUT       2
+#define REC_MAX_RC        8
+#define HOP_COUNT_LIMIT  32
+#define LQ6_TIMEOUT       1
+#define LQ6_MAX_RT       10
+#define LQ6_MAX_RC        5
+
+/* Leasequery query-types (RFC 5007) */
+
+#define LQ6QT_BY_ADDRESS        1
+#define LQ6QT_BY_CLIENTID       2
+
+/*
+ * DUID time starts 2000-01-01.
+ * This constant is the number of seconds since 1970-01-01,
+ * when the Unix epoch began.
+ */
+#define DUID_TIME_EPOCH 946684800
+
+/* Information-Request Time option (RFC 4242) */
+
+#define IRT_DEFAULT     86400
+#define IRT_MINIMUM     600
+
+#endif
diff --git a/src/lib/dhcp/libdhcp.cc b/src/lib/dhcp/libdhcp.cc
new file mode 100644
index 0000000..f84e495
--- /dev/null
+++ b/src/lib/dhcp/libdhcp.cc
@@ -0,0 +1,180 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <boost/shared_array.hpp>
+#include <boost/shared_ptr.hpp>
+#include <util/buffer.h>
+#include <dhcp/libdhcp.h>
+#include "config.h"
+#include <dhcp/dhcp4.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+// static array with factories for options
+std::map<unsigned short, Option::Factory*> LibDHCP::v6factories_;
+
+unsigned int
+LibDHCP::unpackOptions6(const boost::shared_array<uint8_t> buf,
+                        unsigned int buf_len,
+                        unsigned int offset, unsigned int parse_len,
+                        isc::dhcp::Option::OptionCollection& options) {
+    if (offset + parse_len > buf_len) {
+        isc_throw(OutOfRange, "Option parse failed. Tried to parse "
+                  << parse_len << " bytes at offset " << offset
+                  << ":  out of buffer");
+    }
+    unsigned int end = offset + parse_len;
+
+    while (offset +4 <= end) {
+        uint16_t opt_type = buf[offset]*256 + buf[offset+1];
+        offset += 2;
+        uint16_t opt_len = buf[offset]*256 + buf[offset+1];
+        offset += 2;
+
+        if (offset + opt_len > end ) {
+            cout << "Option " << opt_type << " truncated." << endl;
+            return (offset);
+        }
+        boost::shared_ptr<Option> opt;
+        switch (opt_type) {
+        case D6O_IA_NA:
+        case D6O_IA_PD:
+            // cout << "Creating Option6IA" << endl;
+            opt = boost::shared_ptr<Option>(new Option6IA(opt_type,
+                                                          buf, buf_len,
+                                                          offset,
+                                                          opt_len));
+            break;
+        case D6O_IAADDR:
+            // cout << "Creating Option6IAAddr" << endl;
+            opt = boost::shared_ptr<Option>(new Option6IAAddr(opt_type,
+                                                              buf, buf_len,
+                                                              offset, opt_len));
+            break;
+        default:
+            // cout << "Creating Option" << endl;
+            opt = boost::shared_ptr<Option>(new Option(Option::V6,
+                                                       opt_type,
+                                                       buf,
+                                                       offset,
+                                                       opt_len));
+            break;
+        }
+        // add option to options
+        options.insert(pair<int, boost::shared_ptr<Option> >(opt_type, opt));
+        offset += opt_len;
+    }
+
+    return (offset);
+}
+
+void
+LibDHCP::unpackOptions4(const std::vector<uint8_t>& buf,
+                        isc::dhcp::Option::OptionCollection& options) {
+    size_t offset = 0;
+
+    // 2 - header of DHCPv4 option
+    while (offset + 1 <= buf.size()) {
+        uint8_t opt_type = buf[offset++];
+        if (offset + 1 == buf.size()) {
+            if (opt_type == DHO_END)
+                return; // just return. Don't need to add DHO_END option
+            else {
+                isc_throw(OutOfRange, "Attempt to parse truncated option "
+                          << opt_type);
+            }
+        }
+
+        uint8_t opt_len =  buf[offset++];
+        if (offset + opt_len > buf.size() ) {
+            isc_throw(OutOfRange, "Option parse failed. Tried to parse "
+                      << offset + opt_len << " bytes from " << buf.size()
+                      << "-byte long buffer.");
+        }
+
+        boost::shared_ptr<Option> opt;
+        switch(opt_type) {
+        default:
+            opt = boost::shared_ptr<Option>(new Option(Option::V4, opt_type,
+                                                       buf.begin()+offset,
+                                                       buf.begin()+offset+opt_len));
+        }
+
+        options.insert(pair<int, boost::shared_ptr<Option> >(opt_type, opt));
+        offset += opt_len;
+    }
+}
+
+unsigned int
+LibDHCP::packOptions6(boost::shared_array<uint8_t> data,
+                      unsigned int data_len,
+                      unsigned int offset,
+                      const isc::dhcp::Option::OptionCollection& options) {
+    try {
+        for (Option::OptionCollection::const_iterator it = options.begin();
+             it != options.end();
+             ++it) {
+            unsigned short opt_len = (*it).second->len();
+            if (offset + opt_len > data_len) {
+                isc_throw(OutOfRange, "Failed to build option " <<
+                          (*it).first << ": out of buffer");
+            }
+            offset = it->second->pack(data, data_len, offset);
+        }
+    }
+    catch (const Exception& e) {
+        cout << "Packet build failed (Option build failed)." << endl;
+        throw;
+    }
+    return (offset);
+}
+
+void
+LibDHCP::packOptions(isc::util::OutputBuffer& buf,
+                     const Option::OptionCollection& options) {
+    for (Option::OptionCollection::const_iterator it = options.begin();
+         it != options.end();
+         ++it) {
+        it->second->pack4(buf);
+    }
+}
+
+
+bool
+LibDHCP::OptionFactoryRegister(Option::Universe u,
+                               unsigned short opt_type,
+                               Option::Factory * factory) {
+    switch (u) {
+    case Option::V6: {
+        if (v6factories_.find(opt_type)!=v6factories_.end()) {
+            isc_throw(BadValue, "There is already DHCPv6 factory registered "
+                     << "for option type "  << opt_type);
+        }
+        v6factories_[opt_type]=factory;
+        return true;
+    }
+    case Option::V4:
+    default:{
+        isc_throw(BadValue, "This universe type is not supported yet.");
+        return false; // never happens
+    }
+    }
+
+}
diff --git a/src/lib/dhcp/libdhcp.h b/src/lib/dhcp/libdhcp.h
new file mode 100644
index 0000000..468e6bb
--- /dev/null
+++ b/src/lib/dhcp/libdhcp.h
@@ -0,0 +1,103 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef LIBDHCP_H_
+#define LIBDHCP_H_
+
+#include <iostream>
+#include <util/buffer.h>
+#include <dhcp/pkt6.h>
+
+namespace isc {
+namespace dhcp {
+
+class LibDHCP {
+
+public:
+    /// Builds collection of options.
+    ///
+    /// Builds raw (on-wire) data for provided collection of options.
+    ///
+    /// @param buf shared pointer to buffer. Data will be stored there.
+    /// @param buf_len buffer length. Used for buffer overflow protection.
+    /// @param offset Offset from beginning of the buffer, where store options
+    /// @param options collection of options to store to
+    ///
+    /// @return offset to the first unused byte in buffer (next one after last
+    ///         used byte)
+    ///
+    static unsigned int
+    packOptions6(boost::shared_array<uint8_t> buf, unsigned int buf_len,
+                 unsigned int offset,
+                 const isc::dhcp::Option::OptionCollection& options);
+
+
+    /// @brief Stores options in a buffer.
+    ///
+    /// Stores all options defined in options containers in a on-wire
+    /// format in output buffer specified by buf.
+    ///
+    /// May throw different exceptions if option assembly fails. There
+    /// may be different reasons (option too large, option malformed,
+    /// too many options etc.)
+    ///
+    /// @param buf
+    /// @param options
+    static void
+    packOptions(isc::util::OutputBuffer& buf,
+                const isc::dhcp::Option::OptionCollection& options);
+
+    static void
+    unpackOptions4(const std::vector<uint8_t>& buf,
+                   isc::dhcp::Option::OptionCollection& options);
+    ///
+    /// Parses provided buffer and creates Option objects.
+    ///
+    /// Parses provided buf array and stores created Option objects
+    /// in options container.
+    ///
+    /// @param buf Buffer to be parsed.
+    /// @param offset Specifies offset for the first option.
+    /// @param options Reference to option container. Options will be
+    ///        put here.
+    ///
+    /// @return offset to first byte after last parsed option
+    ///
+    static unsigned int
+    unpackOptions6(const boost::shared_array<uint8_t> buf, unsigned int buf_len,
+                   unsigned int offset, unsigned int parse_len,
+                   isc::dhcp::Option::OptionCollection& options_);
+
+    ///
+    /// Registers factory method that produces options of specific option types.
+    ///
+    /// @param u universe of the option (V4 or V6)
+    /// @param opt_type option-type
+    /// @param factory function pointer
+    ///
+    /// @return true, if registration was successful, false otherwise
+    ///
+    static bool
+    OptionFactoryRegister(Option::Universe u,
+                          unsigned short type,
+                          Option::Factory * factory);
+protected:
+    // pointers to factories that produce DHCPv6 options
+    static std::map<unsigned short, Option::Factory*> v6factories_;
+};
+
+}
+}
+
+#endif
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
new file mode 100644
index 0000000..20dd97a
--- /dev/null
+++ b/src/lib/dhcp/option.cc
@@ -0,0 +1,306 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string.h>
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include <iomanip>
+#include <boost/shared_array.hpp>
+#include "exceptions/exceptions.h"
+#include "util/io_utilities.h"
+
+#include "dhcp/option.h"
+#include "dhcp/libdhcp.h"
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+Option::Option(Universe u, unsigned short type)
+    :universe_(u), type_(type) {
+
+    if ((u == V4) && (type > 255)) {
+        isc_throw(BadValue, "Can't create V4 option of type "
+                  << type << ", V4 options are in range 0..255");
+    }
+}
+
+Option::Option(Universe u, unsigned short type,
+               const boost::shared_array<uint8_t>& buf,
+               unsigned int offset, unsigned int len)
+    :universe_(u), type_(type),
+     offset_(offset)
+{
+    uint8_t* ptr = &buf[offset];
+    data_ = std::vector<uint8_t>(ptr, ptr + len);
+
+    check();
+}
+
+Option::Option(Universe u, unsigned short type, std::vector<uint8_t>& data)
+    :universe_(u), type_(type), data_(data) {
+    check();
+}
+
+Option::Option(Universe u, uint16_t type, vector<uint8_t>::const_iterator first,
+               vector<uint8_t>::const_iterator last)
+    :universe_(u), type_(type), data_(std::vector<uint8_t>(first,last)) {
+    check();
+}
+
+void
+Option::check() {
+    if ( (universe_ != V4) && (universe_ != V6) ) {
+        isc_throw(BadValue, "Invalid universe type specified."
+                  << "Only V4 and V6 are allowed.");
+    }
+
+    if (universe_ == V4) {
+
+        if (type_ > 255) {
+            isc_throw(OutOfRange, "DHCPv4 Option type " << type_ << " is too big."
+                      << "For DHCPv4 allowed type range is 0..255");
+        } else if (data_.size() > 255) {
+            isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big.");
+            /// TODO Larger options can be stored as separate instances
+            /// of DHCPv4 options. Clients MUST concatenate them.
+            /// Fortunately, there are no such large options used today.
+        }
+    }
+
+    // no need to check anything for DHCPv6. It allows full range (0-64k) of
+    // both types and data size.
+}
+
+unsigned int
+Option::pack(boost::shared_array<uint8_t>& buf,
+             unsigned int buf_len,
+             unsigned int offset) {
+    if (universe_ != V6) {
+        isc_throw(BadValue, "Failed to pack " << type_ << " option. Do not "
+                  << "use this method for options other than DHCPv6.");
+    }
+    return pack6(buf, buf_len, offset);
+}
+
+void
+Option::pack4(isc::util::OutputBuffer& buf) {
+    switch (universe_) {
+    case V4: {
+        if (data_.size() > 255) {
+            isc_throw(OutOfRange, "DHCPv4 Option " << type_ << " is too big."
+                      << "At most 255 bytes are supported.");
+            /// TODO Larger options can be stored as separate instances
+            /// of DHCPv4 options. Clients MUST concatenate them.
+            /// Fortunately, there are no such large options used today.
+        }
+
+        buf.writeUint8(type_);
+        buf.writeUint8(len() - getHeaderLen());
+
+        buf.writeData(&data_[0], data_.size());
+
+        LibDHCP::packOptions(buf, options_);
+        return;
+    }
+    case V6:
+        /// TODO: Do we need a sanity check for option size here?
+        buf.writeUint16(type_);
+        buf.writeUint16(len() - getHeaderLen());
+
+        LibDHCP::packOptions(buf, options_);
+        return;
+    default:
+        isc_throw(OutOfRange, "Invalid universe type" << universe_);
+    }
+}
+
+unsigned int
+Option::pack6(boost::shared_array<uint8_t>& buf,
+             unsigned int buf_len,
+             unsigned int offset) {
+    if (offset+len() > buf_len) {
+        isc_throw(OutOfRange, "Failed to pack v6 option=" <<
+                  type_ << ",len=" << len() << ": too small buffer.");
+    }
+
+    uint8_t* ptr = &buf[offset];
+
+    ptr = writeUint16(type_, ptr);
+
+    ptr = writeUint16(len() - getHeaderLen(), ptr);
+
+    if (! data_.empty())
+        memcpy(ptr, &data_[0], data_.size());
+
+    // end of fixed part of this option
+    offset += OPTION6_HDR_LEN + data_.size();
+
+    return LibDHCP::packOptions6(buf, buf_len, offset, options_);
+}
+
+unsigned int
+Option::unpack(const boost::shared_array<uint8_t>& buf,
+               unsigned int buf_len,
+               unsigned int offset,
+               unsigned int parse_len) {
+    switch (universe_) {
+    case V4:
+        return unpack4(buf, buf_len, offset, parse_len);
+    case V6:
+        return unpack6(buf, buf_len, offset, parse_len);
+    default:
+        isc_throw(BadValue, "Unknown universe defined for Option " << type_);
+    }
+
+    return 0; // should not happen
+}
+
+unsigned int
+Option::unpack4(const boost::shared_array<uint8_t>&,
+                unsigned int ,
+                unsigned int ,
+                unsigned int ) {
+    isc_throw(Unexpected, "IPv4 support not implemented yet.");
+    return 0;
+}
+
+unsigned int
+Option::unpack6(const boost::shared_array<uint8_t>& buf,
+                unsigned int buf_len,
+                unsigned int offset,
+                unsigned int parse_len) {
+
+    if (buf_len < offset+parse_len) {
+        isc_throw(OutOfRange, "Failed to unpack DHCPv6 option len="
+                  << parse_len << " offset=" << offset
+                  << " from buffer (length=" << buf_len
+                  << "): too small buffer.");
+    }
+
+    uint8_t* ptr = &buf[offset];
+    data_ = std::vector<uint8_t>(ptr, ptr + parse_len);
+
+    offset_ = offset;
+
+    return (offset+parse_len);
+
+    //return LibDHCP::unpackOptions6(buf, buf_len, offset, parse_len,
+    //                               options_);
+}
+
+/// Returns length of the complete option (data length + DHCPv4/DHCPv6
+/// option header)
+uint16_t
+Option::len() {
+
+    // length of the whole option is header and data stored in this option...
+    int length = getHeaderLen() + data_.size();
+
+    // ... and sum of lengths of all suboptions
+    for (Option::OptionCollection::iterator it = options_.begin();
+         it != options_.end();
+         ++it) {
+        length += (*it).second->len();
+    }
+
+    // note that this is not equal to lenght field. This value denotes
+    // number of bytes required to store this option. length option should
+    // contain (len()-getHeaderLen()) value.
+    return (length);
+}
+
+bool
+Option::valid() {
+    if (universe_ != V4 &&
+        universe_ != V6) {
+        return (false);
+    }
+
+    return (true);
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Option::getOption(unsigned short opt_type) {
+    isc::dhcp::Option::OptionCollection::const_iterator x =
+        options_.find(opt_type);
+    if ( x != options_.end() ) {
+        return (*x).second;
+    }
+    return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+bool
+Option::delOption(unsigned short opt_type) {
+    isc::dhcp::Option::OptionCollection::iterator x = options_.find(opt_type);
+    if ( x != options_.end() ) {
+        options_.erase(x);
+        return true; // delete successful
+    }
+    return (false); // option not found, can't delete
+}
+
+
+std::string Option::toText(int indent /* =0 */ ) {
+    std::stringstream tmp;
+
+    for (int i = 0; i < indent; i++)
+        tmp << " ";
+
+    tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ": ";
+
+    for (unsigned int i = 0; i < data_.size(); i++) {
+        if (i) {
+            tmp << ":";
+        }
+        tmp << setfill('0') << setw(2) << hex
+            << static_cast<unsigned short>(data_[i]);
+    }
+
+    // print suboptions
+    for (OptionCollection::const_iterator opt = options_.begin();
+         opt != options_.end();
+         ++opt) {
+        tmp << (*opt).second->toText(indent+2);
+    }
+    return tmp.str();
+}
+
+uint16_t
+Option::getHeaderLen() {
+    switch (universe_) {
+    case V4:
+        return OPTION4_HDR_LEN; // header length for v4
+    case V6:
+        return OPTION6_HDR_LEN; // header length for v6
+    }
+    return 0; // should not happen
+}
+
+void
+Option::addOption(boost::shared_ptr<Option> opt) {
+    if (universe_ == V4) {
+        // check for uniqueness (DHCPv4 options must be unique)
+        if (getOption(opt->getType())) {
+            isc_throw(BadValue, "Option " << opt->getType()
+                      << " already present in this message.");
+        }
+    }
+    options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+Option::~Option() {
+
+}
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
new file mode 100644
index 0000000..088d094
--- /dev/null
+++ b/src/lib/dhcp/option.h
@@ -0,0 +1,316 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION_H_
+#define OPTION_H_
+
+#include <string>
+#include <map>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+
+namespace isc {
+namespace dhcp {
+
+class Option {
+public:
+    /// length of the usual DHCPv4 option header (there are exceptions)
+    const static size_t OPTION4_HDR_LEN = 2;
+
+    /// length of any DHCPv6 option header
+    const static size_t OPTION6_HDR_LEN = 4;
+
+    /// defines option universe DHCPv4 or DHCPv6
+    enum Universe { V4, V6 };
+
+    /// a collection of DHCPv6 options
+    typedef std::multimap<unsigned int, boost::shared_ptr<Option> >
+    OptionCollection;
+
+    /// @brief a factory function prototype
+    ///
+    /// @param u option universe (DHCPv4 or DHCPv6)
+    /// @param type option type
+    /// @param buf pointer to a buffer
+    /// @param offset offset to first data byte in that buffer
+    /// @param len data length of this option
+    ///
+    /// @return a pointer to a created option object
+    typedef boost::shared_ptr<Option> Factory(Option::Universe u,
+                                              unsigned short type,
+                                              boost::shared_array<uint8_t>& buf,
+                                              unsigned int offset,
+                                              unsigned int len);
+
+    /// @brief ctor, used for options constructed, usually during transmission
+    ///
+    /// @param u option universe (DHCPv4 or DHCPv6)
+    /// @param type option type
+    Option(Universe u, unsigned short type);
+
+    /// @brief ctor, used for received options
+    ///
+    /// boost::shared_array allows sharing a buffer, but it requires that
+    /// different instances share pointer to the whole array, not point
+    /// to different elements in shared array. Therefore we need to share
+    /// pointer to the whole array and remember offset where data for
+    /// this option begins
+    ///
+    /// @param u specifies universe (V4 or V6)
+    /// @param type option type
+    /// @param buf pointer to a buffer
+    /// @param offset offset in a buffer pointing to first byte of data
+    /// @param len length of the option data
+    Option(Universe u, unsigned short type,
+           const boost::shared_array<uint8_t>& buf, unsigned int offset,
+           unsigned int len);
+
+    /// @brief Constructor, used for received options.
+    ///
+    /// This constructor takes vector<uint8_t>& which is used in cases
+    /// when content of the option will be copied and stored within
+    /// option object. V4 Options follow that approach already.
+    /// TODO Migrate V6 options to that approach.
+    ///
+    /// @param u specifies universe (V4 or V6)
+    /// @param type option type (0-255 for V4 and 0-65535 for V6)
+    /// @param data content of the option
+    Option(Universe u, unsigned short type, std::vector<uint8_t>& data);
+
+    /// @brief Constructor, used for received options.
+    ///
+    /// This contructor is similar to the previous one, but it does not take
+    /// the whole vector<uint8_t>, but rather subset of it.
+    ///
+    /// TODO: This can be templated to use different containers, not just
+    /// vector. Prototype should look like this:
+    /// template<typename InputIterator> Option(Universe u, uint16_t type,
+    /// InputIterator first, InputIterator last);
+    ///
+    /// vector<int8_t> myData;
+    /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+    /// This will create DHCPv4 option of type 123 that contains data from
+    /// trimmed (first and last byte removed) myData vector.
+    ///
+    /// @param u specifies universe (V4 or V6)
+    /// @param type option type (0-255 for V4 and 0-65535 for V6)
+    /// @param first iterator to the first element that should be copied
+    /// @param last iterator to the next element after the last one
+    ///        to be copied.
+    Option(Universe u, uint16_t type,
+           std::vector<uint8_t>::const_iterator first,
+           std::vector<uint8_t>::const_iterator last);
+
+    /// @brief returns option universe (V4 or V6)
+    ///
+    /// @return universe type
+    Universe
+    getUniverse() { return universe_; };
+
+    /// @brief Writes option in wire-format to a buffer.
+    ///
+    /// Writes option in wire-format to buffer, returns pointer to first unused
+    /// byte after stored option (that is useful for writing options one after
+    /// another). Used in DHCPv6 options.
+    ///
+    /// TODO: Migrate DHCPv6 code to pack(OutputBuffer& buf) version
+    ///
+    /// @param buf pointer to a buffer
+    /// @param buf_len length of the buffer
+    /// @param offset offset to place, where option shout be stored
+    ///
+    /// @return offset to first unused byte after stored option
+    ///
+    virtual unsigned int
+    pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+         unsigned int offset);
+
+    /// @brief Writes option in a wire-format to a buffer.
+    ///
+    /// Method will throw if option storing fails for some reason.
+    ///
+    /// TODO Once old (DHCPv6) implementation is rewritten,
+    /// unify pack4() and pack6() and rename them to just pack().
+    ///
+    /// @param buf output buffer (option will be stored there)
+    virtual void
+    pack4(isc::util::OutputBuffer& buf);
+
+
+    /// @brief Parses buffer.
+    ///
+    /// Parses received buffer, returns offset to the first unused byte after
+    /// parsed option.
+    ///
+    /// @param buf pointer to buffer
+    /// @param buf_len length of buf
+    /// @param offset offset, where start parsing option
+    /// @param parse_len how many bytes should be parsed
+    ///
+    /// @return offset after last parsed octet
+    virtual unsigned int
+    unpack(const boost::shared_array<uint8_t>& buf,
+           unsigned int buf_len,
+           unsigned int offset,
+           unsigned int parse_len);
+
+    /// Returns string representation of the option.
+    ///
+    /// @param indent number of spaces before printing text
+    ///
+    /// @return string with text representation.
+    virtual std::string
+    toText(int indent = 0);
+
+    /// Returns option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
+    ///
+    /// @return option type
+    unsigned short getType() { return (type_); }
+
+    /// Returns length of the complete option (data length + DHCPv4/DHCPv6
+    /// option header)
+    ///
+    /// @return length of the option
+    virtual uint16_t
+    len();
+
+    /// @brief Returns length of header (2 for v4, 4 for v6)
+    ///
+    /// @return length of option header
+    virtual uint16_t
+    getHeaderLen();
+
+    /// returns if option is valid (e.g. option may be truncated)
+    ///
+    /// @return true, if option is valid
+    virtual bool
+    valid();
+
+    /// Returns pointer to actual data.
+    ///
+    /// @return pointer to actual data (or reference to an empty vector
+    ///         if there is no data)
+    virtual const std::vector<uint8_t>& getData() { return (data_); }
+
+    /// Adds a sub-option.
+    ///
+    /// Some DHCPv6 options can have suboptions. This method allows adding
+    /// options within options.
+    ///
+    /// Note: option is passed by value. That is very convenient as it allows
+    /// downcasting from any derived classes, e.g. shared_ptr<Option6_IA> type
+    /// can be passed directly, without any casts. That would not be possible
+    /// with passing by reference. addOption() is expected to be used in
+    /// many places. Requiring casting is not feasible.
+    ///
+    /// @param opt shared pointer to a suboption that is going to be added.
+    void
+    addOption(boost::shared_ptr<Option> opt);
+
+    /// Returns shared_ptr to suboption of specific type
+    ///
+    /// @param type type of requested suboption
+    ///
+    /// @return shared_ptr to requested suoption
+    boost::shared_ptr<isc::dhcp::Option>
+    getOption(unsigned short type);
+
+    /// Attempts to delete first suboption of requested type
+    ///
+    /// @param type Type of option to be deleted.
+    ///
+    /// @return true if option was deleted, false if no such option existed
+    bool
+    delOption(unsigned short type);
+
+    /// just to force that every option has virtual dtor
+    virtual
+    ~Option();
+
+protected:
+    /// Builds raw (over-wire) buffer of this option, including all
+    /// defined suboptions. Version for building DHCPv4 options.
+    ///
+    /// @param buf output buffer (built options will be stored here)
+    /// @param buf_len buffer length (used for buffer overflow checks)
+    /// @param offset offset from start of the buf buffer
+    ///
+    /// @return offset to the next byte after last used byte
+    virtual unsigned int
+    pack6(boost::shared_array<uint8_t>& buf,
+          unsigned int buf_len,
+          unsigned int offset);
+
+    /// Parses provided buffer and creates DHCPv4 options.
+    ///
+    /// @param buf buffer that contains raw buffer to parse (on-wire format)
+    /// @param buf_len buffer length (used for buffer overflow checks)
+    /// @param offset offset from start of the buf buffer
+    ///
+    /// @return offset to the next byte after last parsed byte
+    virtual unsigned int
+    unpack4(const boost::shared_array<uint8_t>& buf,
+            unsigned int buf_len,
+            unsigned int offset,
+            unsigned int parse_len);
+
+    /// Parses provided buffer and creates DHCPv6 options.
+    ///
+    /// @param buf buffer that contains raw buffer to parse (on-wire format)
+    /// @param buf_len buffer length (used for buffer overflow checks)
+    /// @param offset offset from start of the buf buffer
+    ///
+    /// @return offset to the next byte after last parsed byte
+    virtual unsigned int
+    unpack6(const boost::shared_array<uint8_t>& buf,
+            unsigned int buf_len,
+            unsigned int offset,
+            unsigned int parse_len);
+
+    /// @brief A private method used for option correctness.
+    ///
+    /// It is used in constructors. In there are any problems detected
+    /// (like specifying type > 255 for DHCPv4 option), it will throw
+    /// BadValue or OutOfRange exceptions.
+    void check();
+
+    /// option universe (V4 or V6)
+    Universe universe_;
+
+    /// option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
+    unsigned short type_;
+
+    /// contains content of this data
+    std::vector<uint8_t> data_;
+
+    /// TODO: Remove this field. vector<uint8_t> should be used
+    /// instead.
+    /// data is a shared_pointer that points out to the
+    /// whole packet. offset_ specifies where data for
+    /// this option begins.
+    unsigned int offset_;
+
+    /// collection for storing suboptions
+    OptionCollection options_;
+
+    /// TODO: probably 2 different containers have to be used for v4 (unique
+    /// options) and v6 (options with the same type can repeat)
+};
+
+} // namespace isc::dhcp
+} // namespace isc
+
+#endif
diff --git a/src/lib/dhcp/option4_addrlst.cc b/src/lib/dhcp/option4_addrlst.cc
new file mode 100644
index 0000000..88eb915
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.cc
@@ -0,0 +1,135 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string.h>
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include <iomanip>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <util/io_utilities.h>
+#include <dhcp/option4_addrlst.h>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+using namespace isc::asiolink;
+
+Option4AddrLst::Option4AddrLst(uint8_t type)
+    :Option(V4, type) {
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const AddressContainer& addrs)
+    :Option(V4, type) {
+    setAddresses(addrs);
+    // don't set addrs_ directly. setAddresses() will do additional checks.
+}
+
+
+Option4AddrLst::Option4AddrLst(uint8_t type,
+                               vector<uint8_t>::const_iterator first,
+                               vector<uint8_t>::const_iterator last)
+    :Option(V4, type) {
+    if ( (distance(first, last) % V4ADDRESS_LEN) ) {
+        isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_
+                  << " has invalid length=" << distance(first, last)
+                  << ", must be divisible by 4.");
+    }
+
+    while (first != last) {
+        const uint8_t* ptr = &(*first);
+        addAddress(IOAddress(readUint32(ptr)));
+        first += V4ADDRESS_LEN;
+    }
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const IOAddress& addr)
+    :Option(V4, type) {
+    setAddress(addr);
+}
+
+void
+Option4AddrLst::pack4(isc::util::OutputBuffer& buf) {
+
+    if (addrs_.size() * V4ADDRESS_LEN > 255) {
+        isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_ << " is too big."
+                  << "At most 255 bytes are supported.");
+        /// TODO Larger options can be stored as separate instances
+        /// of DHCPv4 options. Clients MUST concatenate them.
+        /// Fortunately, there are no such large options used today.
+    }
+
+    buf.writeUint8(type_);
+    buf.writeUint8(len() - getHeaderLen());
+
+    AddressContainer::const_iterator addr = addrs_.begin();
+
+    while (addr != addrs_.end()) {
+        buf.writeUint32(*addr);
+        ++addr;
+    }
+}
+
+void Option4AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET) {
+        isc_throw(BadValue, "Can't store non-IPv4 address in "
+                  << "Option4AddrLst option");
+    }
+    addrs_.clear();
+    addAddress(addr);
+}
+
+void Option4AddrLst::setAddresses(const AddressContainer& addrs) {
+
+    // Do not copy it as a whole. addAddress() does sanity checks.
+    // i.e. throw if someone tries to set IPv6 address.
+    addrs_.clear();
+    for (AddressContainer::const_iterator addr = addrs.begin();
+         addr != addrs.end(); ++addr) {
+        addAddress(*addr);
+    }
+}
+
+
+void Option4AddrLst::addAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET) {
+        isc_throw(BadValue, "Can't store non-IPv4 address in "
+                  << "Option4AddrLst option");
+    }
+    addrs_.push_back(addr);
+}
+
+uint16_t Option4AddrLst::len() {
+
+    // Returns length of the complete option (option header + data length)
+    return (getHeaderLen() + addrs_.size() * V4ADDRESS_LEN);
+}
+
+std::string Option4AddrLst::toText(int indent /* =0 */ ) {
+    std::stringstream tmp;
+
+    for (int i = 0; i < indent; i++) {
+        tmp << " ";
+    }
+
+    tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ":";
+
+    for (AddressContainer::const_iterator addr = addrs_.begin();
+         addr != addrs_.end(); ++addr) {
+        tmp << " " << (*addr);
+    }
+
+    return tmp.str();
+}
diff --git a/src/lib/dhcp/option4_addrlst.h b/src/lib/dhcp/option4_addrlst.h
new file mode 100644
index 0000000..c795805
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.h
@@ -0,0 +1,167 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION4_ADDRLST_H_
+#define OPTION4_ADDRLST_H_
+
+#include <string>
+#include <map>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+#include <dhcp/option.h>
+
+namespace isc {
+namespace dhcp {
+
+
+/// @brief DHCPv4 Option class for handling list of IPv4 addresses.
+///
+/// This class handles a list of IPv4 addresses. An example of such option
+/// is dns-servers option. It can also be used to handle a single address.
+class Option4AddrLst : public isc::dhcp::Option {
+public:
+
+    /// Defines a collection of IPv4 addresses.
+    typedef std::vector<isc::asiolink::IOAddress> AddressContainer;
+
+    /// @brief Constructor, creates an option with empty list of addresses.
+    ///
+    /// Creates empty option that can hold addresses. Addresses can be added
+    /// with addAddress(), setAddress() or setAddresses().
+    ///
+    /// @param type option type
+    Option4AddrLst(uint8_t type);
+
+    /// @brief Constructor, creates an option with a list of addresses.
+    ///
+    /// Creates an option that contains specified list of IPv4 addresses.
+    ///
+    /// @param type option type
+    /// @param addrs container with a list of addresses
+    Option4AddrLst(uint8_t type, const AddressContainer& addrs);
+
+    /// @brief Constructor, creates an option with a single address.
+    ///
+    /// Creates an option that contains a single address.
+    ///
+    /// @param type option type
+    /// @param addr a single address that will be stored as 1-elem. address list
+    Option4AddrLst(uint8_t type, const isc::asiolink::IOAddress& addr);
+
+    /// @brief Constructor, used for received options.
+    ///
+    /// TODO: This can be templated to use different containers, not just
+    /// vector. Prototype should look like this:
+    /// template<typename InputIterator> Option(Universe u, uint16_t type,
+    /// InputIterator first, InputIterator last);
+    ///
+    /// vector<int8_t> myData;
+    /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+    /// This will create DHCPv4 option of type 123 that contains data from
+    /// trimmed (first and last byte removed) myData vector.
+    ///
+    /// @param type option type (0-255 for V4 and 0-65535 for V6)
+    /// @param first iterator to the first element that should be copied
+    /// @param last iterator to the next element after the last one
+    ///        to be copied.
+    Option4AddrLst(uint8_t type, std::vector<uint8_t>::const_iterator first,
+           std::vector<uint8_t>::const_iterator last);
+
+    /// @brief Writes option in a wire-format to a buffer.
+    ///
+    /// Method will throw if option storing fails for some reason.
+    ///
+    /// TODO Once old (DHCPv6) implementation is rewritten,
+    /// unify pack4() and pack6() and rename them to just pack().
+    ///
+    /// @param buf output buffer (option will be stored there)
+    virtual void
+    pack4(isc::util::OutputBuffer& buf);
+
+    /// Returns string representation of the option.
+    ///
+    /// @param indent number of spaces before printing text
+    ///
+    /// @return string with text representation.
+    virtual std::string
+    toText(int indent = 0);
+
+    /// Returns length of the complete option (data length + DHCPv4/DHCPv6
+    /// option header)
+    ///
+    /// @return length of the option
+    virtual uint16_t len();
+
+    /// @brief Returns vector with addresses.
+    ///
+    /// We return a copy of our list. Although this includes overhead,
+    /// it also makes this list safe to use after this option object
+    /// is no longer available. As options are expected to hold only
+    /// a couple (1-3) addresses, the overhead is not that big.
+    ///
+    /// @return address container with addresses
+    AddressContainer
+    getAddresses() { return addrs_; };
+
+    /// @brief Sets addresses list.
+    ///
+    /// Clears existing list of addresses and adds a single address to that
+    /// list. This is very convenient method for options that are supposed to
+    /// only a single option. See addAddress() if you want to add
+    /// address to existing list or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addrs address collection to be set
+    void setAddresses(const AddressContainer& addrs);
+
+    /// @brief Clears address list and sets a single address.
+    ///
+    /// Clears existing list of addresses and adds a single address to that
+    /// list. This is very convenient method for options that are supposed to
+    /// only a single option. See addAddress() if you want to add
+    /// address to existing list or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addr an address that is going to be set as 1-element address list
+    void setAddress(const isc::asiolink::IOAddress& addr);
+
+    /// @brief Adds address to existing list of addresses.
+    ///
+    /// Adds a single address to that list. See setAddress() if you want to
+    /// define only a single address or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addr an address thait is going to be added to existing list
+    void addAddress(const isc::asiolink::IOAddress& addr);
+
+protected:
+    /// contains list of addresses
+    AddressContainer addrs_;
+};
+
+} // namespace isc::dhcp
+} // namespace isc
+
+#endif
diff --git a/src/lib/dhcp/option6_addrlst.cc b/src/lib/dhcp/option6_addrlst.cc
new file mode 100644
index 0000000..9be3810
--- /dev/null
+++ b/src/lib/dhcp/option6_addrlst.cc
@@ -0,0 +1,138 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include "exceptions/exceptions.h"
+
+#include "asiolink/io_address.h"
+#include "util/io_utilities.h"
+#include "dhcp/libdhcp.h"
+#include "dhcp/option6_addrlst.h"
+#include "dhcp/dhcp6.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+Option6AddrLst::Option6AddrLst(unsigned short type,
+                               const AddressContainer& addrs)
+    :Option(V6, type), addrs_(addrs) {
+}
+
+Option6AddrLst::Option6AddrLst(unsigned short type,
+                               const isc::asiolink::IOAddress& addr)
+    :Option(V6, type), addrs_(1,addr) {
+}
+
+Option6AddrLst::Option6AddrLst(unsigned short type,
+                               boost::shared_array<uint8_t> buf,
+                               unsigned int buf_len,
+                               unsigned int offset,
+                               unsigned int option_len)
+    :Option(V6, type) {
+    unpack(buf, buf_len, offset, option_len);
+}
+
+void
+Option6AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET6) {
+        isc_throw(BadValue, "Can't store non-IPv6 address in Option6AddrLst option");
+    }
+
+    addrs_.clear();
+    addrs_.push_back(addr);
+}
+
+void
+Option6AddrLst::setAddresses(const AddressContainer& addrs) {
+    addrs_ = addrs;
+}
+
+unsigned int
+Option6AddrLst::pack(boost::shared_array<uint8_t>& buf,
+                    unsigned int buf_len,
+                    unsigned int offset) {
+    if (len() > buf_len) {
+        isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
+                  << ", buffer=" << buf_len << ": too small buffer.");
+    }
+
+    writeUint16(type_, &buf[offset]);
+    offset += sizeof(uint16_t);
+
+    // len() returns complete option length.
+    // len field contains length without 4-byte option header
+    writeUint16(len() - OPTION6_HDR_LEN, &buf[offset]);
+    offset += sizeof(uint16_t);
+
+    // this wrapping is *ugly*. I wish there was a a
+    for (AddressContainer::const_iterator addr=addrs_.begin();
+         addr!=addrs_.end();
+         ++addr) {
+        memcpy(&buf[offset],
+               addr->getAddress().to_v6().to_bytes().data(),
+               V6ADDRESS_LEN);
+        offset += V6ADDRESS_LEN;
+    }
+
+    return offset;
+}
+
+unsigned int
+Option6AddrLst::unpack(const boost::shared_array<uint8_t>& buf,
+                       unsigned int buf_len,
+                       unsigned int offset,
+                       unsigned int option_len) {
+    if (offset+option_len > buf_len) {
+        isc_throw(OutOfRange, "Option " << type_
+                  << " truncated.");
+    }
+
+    if (option_len%16) {
+        isc_throw(OutOfRange, "Option " << type_
+                  << " malformed: len=" << option_len
+                  << " is not divisible by 16.");
+    }
+    while (option_len > 0) {
+        addrs_.push_back(IOAddress::from_bytes(AF_INET6, &buf[offset]));
+        offset += 16;
+        option_len -= 16;
+    }
+
+    return offset;
+}
+
+std::string Option6AddrLst::toText(int indent /* =0 */) {
+    stringstream tmp;
+    for (int i=0; i<indent; i++)
+        tmp << " ";
+
+    tmp << "type=" << type_ << " " << addrs_.size() << "addr(s): ";
+
+    for (AddressContainer::const_iterator addr=addrs_.begin();
+         addr!=addrs_.end();
+         ++addr) {
+        tmp << addr->toText() << " ";
+    }
+    return tmp.str();
+}
+
+uint16_t Option6AddrLst::len() {
+
+    return (OPTION6_HDR_LEN + addrs_.size()*16);
+}
diff --git a/src/lib/dhcp/option6_addrlst.h b/src/lib/dhcp/option6_addrlst.h
new file mode 100644
index 0000000..a73dc55
--- /dev/null
+++ b/src/lib/dhcp/option6_addrlst.h
@@ -0,0 +1,126 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION6_ADDRLST_H_
+#define OPTION6_ADDRLST_H_
+
+#include <vector>
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
+
+namespace isc {
+namespace dhcp {
+
+/// @brief DHCPv6 Option class for handling list of IPv6 addresses.
+///
+/// This class handles a list of IPv6 addresses. An example of such option
+/// is dns-servers option. It can also be used to handle single address.
+class Option6AddrLst: public Option {
+
+public:
+    /// a container for (IPv6) addresses
+    typedef std::vector<isc::asiolink::IOAddress> AddressContainer;
+
+    /// @brief Constructor used during option generation.
+    ///
+    /// @param type option type
+    /// @param addrs vector of addresses to be stored
+    ///
+    Option6AddrLst(unsigned short type,
+                   const AddressContainer& addrs);
+
+    /// @brief Simplified constructor for a single address
+    ///
+    /// @param type option type
+    /// @param addr a single address to be stored
+    ///
+    Option6AddrLst(unsigned short type,
+                   const isc::asiolink::IOAddress& addr);
+
+    /// @brief Constructor used for parsing received option
+    ///
+    /// @param type option type
+    /// @param buf pointer to packet buffer
+    /// @param buf_len length of packet buffer
+    /// @param offset offset to beginning of option data
+    /// @param len length of option data
+    ///
+    Option6AddrLst(unsigned short type, boost::shared_array<uint8_t> buf,
+                   unsigned int buf_len,
+                   unsigned int offset,
+                   unsigned int len);
+
+    /// @brief Assembles on-wire form of this option
+    ///
+    /// @param buf pointer to packet buffer
+    /// @param buf_len length of packet buffer
+    /// @param offset offset to place, where option is to be stored
+    ///
+    /// @return offset to the next unused char (just after stored option)
+    ///
+    unsigned int
+    pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+         unsigned int offset);
+
+    /// @brief Parses received data
+    ///
+    /// @param buf pointer to packet buffer
+    /// @param buf_len length of packet buffer
+    /// @param offset offset to option data
+    /// @param parse_len specified option data length
+    ///
+    /// @return offset to the next unparsed char (just after parsed option)
+    ///
+    virtual unsigned int
+    unpack(const boost::shared_array<uint8_t>& buf,
+           unsigned int buf_len,
+           unsigned int offset,
+           unsigned int parse_len);
+
+    virtual std::string toText(int indent = 0);
+
+    /// @brief Sets a single address.
+    ///
+    /// @param addr a single address to be added
+    ///
+    void setAddress(const isc::asiolink::IOAddress& addr);
+
+    /// @brief Sets list of addresses.
+    ///
+    /// @param addrs a vector of addresses to be added
+    ///
+    void setAddresses(const AddressContainer& addrs);
+
+    /// @brief Returns vector with addresses.
+    ///
+    /// We return a copy of our list. Although this includes overhead,
+    /// it also makes this list safe to use after this option object
+    /// is no longer available. As options are expected to hold only
+    /// a couple (1-3) addresses, the overhead is not that big.
+    ///
+    /// @return address container with addresses
+    AddressContainer
+    getAddresses() { return addrs_; };
+
+    // returns data length (data length + DHCPv4/DHCPv6 option header)
+    virtual uint16_t len();
+
+protected:
+    AddressContainer addrs_;
+};
+
+} // isc::dhcp namespace
+} // isc namespace
+
+#endif /* OPTION_ADDRLST_H_ */
diff --git a/src/lib/dhcp/option6_ia.cc b/src/lib/dhcp/option6_ia.cc
new file mode 100644
index 0000000..209f500
--- /dev/null
+++ b/src/lib/dhcp/option6_ia.cc
@@ -0,0 +1,136 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include "exceptions/exceptions.h"
+
+#include "dhcp/libdhcp.h"
+#include "dhcp/option6_ia.h"
+#include "dhcp/dhcp6.h"
+#include "util/io_utilities.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+Option6IA::Option6IA(unsigned short type, unsigned int iaid)
+    :Option(Option::V6, type), iaid_(iaid) {
+}
+
+Option6IA::Option6IA(unsigned short type,
+                     const boost::shared_array<uint8_t>& buf,
+                     unsigned int buf_len,
+                     unsigned int offset,
+                     unsigned int option_len)
+    :Option(Option::V6, type) {
+    unpack(buf, buf_len, offset, option_len);
+}
+
+unsigned int
+Option6IA::pack(boost::shared_array<uint8_t>& buf,
+                unsigned int buf_len,
+                unsigned int offset) {
+    if (offset + len() > buf_len) {
+        isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
+                  << ", buffer=" << buf_len << ": too small buffer.");
+    }
+
+    if (len() < 16 ) {
+        isc_throw(OutOfRange, "Attempt to build malformed IA option: len="
+                  << len() << " is too small (at least 16 is required).");
+    }
+
+    uint8_t* ptr = &buf[offset];
+
+    ptr = writeUint16(type_, ptr);
+    ptr = writeUint16(len() - OPTION6_HDR_LEN, ptr);
+    offset += OPTION6_HDR_LEN;
+
+    ptr = writeUint32(iaid_, ptr);
+    ptr = writeUint32(t1_, ptr);
+    ptr = writeUint32(t2_, ptr);
+    offset += OPTION6_IA_LEN;
+
+    offset = LibDHCP::packOptions6(buf, buf_len, offset, options_);
+    return offset;
+}
+
+unsigned int
+Option6IA::unpack(const boost::shared_array<uint8_t>& buf,
+                  unsigned int buf_len,
+                  unsigned int offset,
+                  unsigned int parse_len) {
+    if ( parse_len < OPTION6_IA_LEN || offset + OPTION6_IA_LEN > buf_len) {
+        isc_throw(OutOfRange, "Option " << type_ << " truncated");
+    }
+
+    iaid_ = readUint32(&buf[offset]);
+    offset += sizeof(uint32_t);
+
+    t1_ = readUint32(&buf[offset]);
+    offset += sizeof(uint32_t);
+
+    t2_ = readUint32(&buf[offset]);
+    offset += sizeof(uint32_t);
+
+    offset = LibDHCP::unpackOptions6(buf, buf_len, offset,
+                                     parse_len - OPTION6_IA_LEN, options_);
+
+    return (offset);
+}
+
+std::string Option6IA::toText(int indent /* = 0*/) {
+    stringstream tmp;
+
+    for (int i=0; i<indent; i++)
+        tmp << " ";
+    tmp << "type=" << type_;
+
+    switch (type_) {
+    case D6O_IA_NA:
+        tmp << "(IA_NA)";
+        break;
+    case D6O_IA_PD:
+        tmp << "(IA_PD)";
+        break;
+    default:
+        tmp << "(unknown)";
+    }
+    tmp << " iaid=" << iaid_ << ", t1=" << t1_ << ", t2=" << t2_
+        << " " << options_.size() << " sub-options:" << endl;
+
+    for (OptionCollection::const_iterator opt=options_.begin();
+         opt!=options_.end();
+         ++opt) {
+        tmp << (*opt).second->toText(indent+2);
+    }
+    return tmp.str();
+}
+
+uint16_t Option6IA::len() {
+
+    uint16_t length = OPTION6_HDR_LEN /*header (4)*/ +
+        OPTION6_IA_LEN  /* option content (12) */;
+
+    // length of all suboptions
+    for (Option::OptionCollection::iterator it = options_.begin();
+         it != options_.end();
+         ++it) {
+        length += (*it).second->len();
+    }
+    return (length);
+}
diff --git a/src/lib/dhcp/option6_ia.h b/src/lib/dhcp/option6_ia.h
new file mode 100644
index 0000000..cab8068
--- /dev/null
+++ b/src/lib/dhcp/option6_ia.h
@@ -0,0 +1,137 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION_IA_H_
+#define OPTION_IA_H_
+
+#include <stdint.h>
+#include "option.h"
+
+namespace isc {
+namespace dhcp {
+
+class Option6IA: public Option {
+
+public:
+    /// Length of IA_NA and IA_PD content
+    const static size_t OPTION6_IA_LEN = 12;
+
+    /// @brief ctor, used for options constructed, usually during transmission
+    ///
+    /// @param type option type (usually 4 for IA_NA, 25 for IA_PD)
+    /// @param iaid identity association identifier (id of IA)
+    Option6IA(uint16_t type, unsigned int iaid);
+
+    /// @brief ctor, used for received options
+    ///
+    /// boost::shared_array allows sharing a buffer, but it requires that
+    /// different instances share pointer to the whole array, not point
+    /// to different elements in shared array. Therefore we need to share
+    /// pointer to the whole array and remember offset where data for
+    /// this option begins
+    ///
+    /// @param type option type (usually 4 for IA_NA, 25 for IA_PD)
+    /// @param buf buffer to be parsed
+    /// @param buf_len buffer length
+    /// @param offset offset in buffer
+    /// @param len number of bytes to parse
+    Option6IA(uint16_t type, const boost::shared_array<uint8_t>& buf,
+              unsigned int buf_len, unsigned int offset, unsigned int len);
+
+    /// Writes option in wire-format to buf, returns pointer to first unused
+    /// byte after stored option.
+    ///
+    /// @param buf buffer (option will be stored here)
+    /// @param buf_len (buffer length)
+    /// @param offset offset place where option should be stored
+    ///
+    /// @return offset to the first unused byte after stored option
+    unsigned int
+    pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+         unsigned int offset);
+
+    /// @brief Parses received buffer
+    ///
+    /// Parses received buffer and returns offset to the first unused byte after
+    /// parsed option.
+    ///
+    /// @param buf pointer to buffer
+    /// @param buf_len length of buf
+    /// @param offset offset, where start parsing option
+    /// @param parse_len how many bytes should be parsed
+    ///
+    /// @return offset after last parsed octet
+    virtual unsigned int
+    unpack(const boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+           unsigned int offset, unsigned int parse_len);
+
+    /// Provides human readable text representation
+    ///
+    /// @param indent number of leading space characters
+    ///
+    /// @return string with text represenation
+    virtual std::string
+    toText(int indent = 0);
+
+    /// Sets T1 timer.
+    ///
+    /// @param t1 t1 value to be set
+    void setT1(unsigned int t1) { t1_=t1; }
+
+
+    /// Sets T2 timer.
+    ///
+    /// @param t2 t2 value to be set
+    void setT2(unsigned int t2) { t2_=t2; }
+
+    /// Returns IA identifier.
+    ///
+    /// @return IAID value.
+    ///
+    unsigned int getIAID() const { return iaid_; }
+
+    /// Returns T1 timer.
+    ///
+    /// @return T1 value.
+    unsigned int getT1() const { return t1_; }
+
+    /// Returns T2 timer.
+    ///
+    /// @return T2 value.
+    unsigned int getT2() const { return t2_; }
+
+    /// @brief returns complete length of option
+    ///
+    /// Returns length of this option, including option header and suboptions
+    ///
+    /// @return length of this option
+    virtual uint16_t
+    len();
+
+protected:
+
+    /// keeps IA identifier
+    unsigned int iaid_;
+
+    /// keeps T1 timer value
+    unsigned int t1_;
+
+    /// keeps T2 timer value
+    unsigned int t2_;
+};
+
+} // isc::dhcp namespace
+} // isc namespace
+
+#endif /* OPTION_IA_H_ */
diff --git a/src/lib/dhcp/option6_iaaddr.cc b/src/lib/dhcp/option6_iaaddr.cc
new file mode 100644
index 0000000..fd3bca4
--- /dev/null
+++ b/src/lib/dhcp/option6_iaaddr.cc
@@ -0,0 +1,132 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include "exceptions/exceptions.h"
+
+#include "dhcp/libdhcp.h"
+#include "dhcp/option6_iaaddr.h"
+#include "dhcp/dhcp6.h"
+#include "asiolink/io_address.h"
+#include "util/io_utilities.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+Option6IAAddr::Option6IAAddr(unsigned short type,
+                             const isc::asiolink::IOAddress& addr,
+                             unsigned int pref, unsigned int valid)
+    :Option(V6, type), addr_(addr), preferred_(pref),
+     valid_(valid) {
+}
+
+Option6IAAddr::Option6IAAddr(unsigned short type,
+                             boost::shared_array<uint8_t> buf,
+                             unsigned int buf_len, unsigned int offset,
+                             unsigned int option_len)
+    :Option(V6, type), addr_("::") {
+    unpack(buf, buf_len, offset, option_len);
+}
+
+unsigned int
+Option6IAAddr::pack(boost::shared_array<uint8_t>& buf,
+                    unsigned int buf_len,
+                    unsigned int offset) {
+    if (len() > buf_len) {
+        isc_throw(OutOfRange, "Failed to pack IA option: len=" << len()
+                  << ", buffer=" << buf_len << ": too small buffer.");
+    }
+
+    uint8_t* ptr = &buf[offset];
+
+    ptr = writeUint16(type_, ptr);
+
+    // len() returns complete option length. len field contains
+    // length without 4-byte option header
+    ptr = writeUint16(len() - OPTION6_HDR_LEN, ptr);
+    offset += OPTION6_HDR_LEN;
+
+    memcpy(ptr, addr_.getAddress().to_v6().to_bytes().data(), 16);
+    ptr += V6ADDRESS_LEN;
+
+    ptr = writeUint32(preferred_, ptr);
+
+    ptr = writeUint32(valid_, ptr);
+    offset += OPTION6_IAADDR_LEN;
+
+    // parse suboption (there shouldn't be any)
+    offset = LibDHCP::packOptions6(buf, buf_len, offset, options_);
+    return offset;
+}
+
+unsigned int
+Option6IAAddr::unpack(const boost::shared_array<uint8_t>& buf,
+                  unsigned int buf_len,
+                  unsigned int offset,
+                  unsigned int parse_len) {
+    if ( parse_len < OPTION6_IAADDR_LEN || offset + OPTION6_IAADDR_LEN > buf_len) {
+        isc_throw(OutOfRange, "Option " << type_ << " truncated");
+    }
+
+    // 16 bytes: IPv6 address
+    addr_ = IOAddress::from_bytes(AF_INET6, &buf[offset]);
+    offset += V6ADDRESS_LEN;
+
+    preferred_ = readUint32(&buf[offset]);
+    offset += sizeof(uint32_t);
+
+    valid_ = readUint32(&buf[offset]);
+    offset += sizeof(uint32_t);
+    offset = LibDHCP::unpackOptions6(buf, buf_len, offset,
+                                     parse_len - 24, options_);
+
+    return offset;
+}
+
+std::string Option6IAAddr::toText(int indent /* =0 */) {
+    stringstream tmp;
+    for (int i=0; i<indent; i++)
+        tmp << " ";
+
+    tmp << "type=" << type_ << "(IAADDR) addr=" << addr_.toText()
+        << ", preferred-lft=" << preferred_  << ", valid-lft="
+        << valid_ << endl;
+
+    for (OptionCollection::const_iterator opt=options_.begin();
+         opt!=options_.end();
+         ++opt) {
+        tmp << (*opt).second->toText(indent+2);
+    }
+    return tmp.str();
+}
+
+uint16_t Option6IAAddr::len() {
+
+    uint16_t length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
+
+    // length of all suboptions
+    // TODO implement:
+    // protected: unsigned short Option::lenHelper(int header_size);
+    for (Option::OptionCollection::iterator it = options_.begin();
+         it != options_.end();
+         ++it) {
+        length += (*it).second->len();
+    }
+    return (length);
+}
diff --git a/src/lib/dhcp/option6_iaaddr.h b/src/lib/dhcp/option6_iaaddr.h
new file mode 100644
index 0000000..40e5967
--- /dev/null
+++ b/src/lib/dhcp/option6_iaaddr.h
@@ -0,0 +1,145 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION6_IAADDR_H_
+#define OPTION6_IAADDR_H_
+
+#include "asiolink/io_address.h"
+#include "dhcp/option.h"
+
+namespace isc {
+namespace dhcp {
+
+class Option6IAAddr: public Option {
+
+public:
+    /// length of the fixed part of the IAADDR option
+    static const size_t OPTION6_IAADDR_LEN = 24;
+
+    /// @brief ctor, used for options constructed (during transmission)
+    ///
+    /// @param type option type
+    /// @param addr reference to an address
+    /// @param preferred address preferred lifetime (in seconds)
+    /// @param valid address valid lifetime (in seconds)
+    Option6IAAddr(unsigned short type, const isc::asiolink::IOAddress& addr,
+                  unsigned int preferred, unsigned int valid);
+
+    /// ctor, used for received options
+    /// boost::shared_array allows sharing a buffer, but it requires that
+    /// different instances share pointer to the whole array, not point
+    /// to different elements in shared array. Therefore we need to share
+    /// pointer to the whole array and remember offset where data for
+    /// this option begins
+    ///
+    /// @param type option type
+    /// @param buf pointer to a buffer
+    /// @param offset offset to first data byte in that buffer
+    /// @param len data length of this option
+    Option6IAAddr(unsigned short type, boost::shared_array<uint8_t> buf,
+                  unsigned int buf_len, unsigned int offset, unsigned int len);
+
+    /// @brief Writes option in wire-format.
+    ///
+    /// Writes option in wire-format to buf, returns pointer to first unused
+    /// byte after stored option.
+    ///
+    /// @param buf pointer to a buffer
+    /// @param buf_len length of the buffer
+    /// @param offset offset to place, where option shout be stored
+    ///
+    /// @return offset to first unused byte after stored option
+    unsigned int
+    pack(boost::shared_array<uint8_t>& buf, unsigned int buf_len,
+         unsigned int offset);
+
+    /// @brief Parses buffer.
+    ///
+    /// Parses received buffer, returns offset to the first unused byte after
+    /// parsed option.
+    ///
+    /// @param buf pointer to buffer
+    /// @param buf_len length of buf
+    /// @param offset offset, where start parsing option
+    /// @param parse_len how many bytes should be parsed
+    ///
+    /// @return offset after last parsed octet
+    virtual unsigned int
+    unpack(const boost::shared_array<uint8_t>& buf,
+           unsigned int buf_len,
+           unsigned int offset,
+           unsigned int parse_len);
+
+    /// Returns string representation of the option.
+    ///
+    /// @param indent number of spaces before printing text
+    ///
+    /// @return string with text representation.
+    virtual std::string
+    toText(int indent = 0);
+
+
+    /// sets address in this option.
+    ///
+    /// @param addr address to be sent in this option
+    void setAddress(const isc::asiolink::IOAddress& addr) { addr_ = addr; }
+
+    /// Sets preferred lifetime (in seconds)
+    ///
+    /// @param pref address preferred lifetime (in seconds)
+    ///
+    void setPreferred(unsigned int pref) { preferred_=pref; }
+
+    /// Sets valid lifetime (in seconds).
+    ///
+    /// @param valid address valid lifetime (in seconds)
+    ///
+    void setValid(unsigned int valid) { valid_=valid; }
+
+    /// Returns  address contained within this option.
+    ///
+    /// @return address
+    isc::asiolink::IOAddress
+    getAddress() const { return addr_; }
+
+    /// Returns preferred lifetime of an address.
+    ///
+    /// @return preferred lifetime (in seconds)
+    unsigned int
+    getPreferred() const { return preferred_; }
+
+    /// Returns valid lifetime of an address.
+    ///
+    /// @return valid lifetime (in seconds)
+    unsigned int
+    getValid() const { return valid_; }
+
+    /// returns data length (data length + DHCPv4/DHCPv6 option header)
+    virtual uint16_t len();
+
+protected:
+    /// contains an IPv6 address
+    isc::asiolink::IOAddress addr_;
+
+    /// contains preferred-lifetime timer (in seconds)
+    unsigned int preferred_;
+
+    /// contains valid-lifetime timer (in seconds)
+    unsigned int valid_;
+};
+
+} // isc::dhcp namespace
+} // isc namespace
+
+#endif /* OPTION_IA_H_ */
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
new file mode 100644
index 0000000..1f68527
--- /dev/null
+++ b/src/lib/dhcp/pkt4.cc
@@ -0,0 +1,257 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dhcp/pkt4.h>
+#include <dhcp/libdhcp.h>
+#include <dhcp/dhcp4.h>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <iostream>
+#include <sstream>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+
+namespace isc {
+namespace dhcp {
+
+const IOAddress DEFAULT_ADDRESS("0.0.0.0");
+
+Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
+     :local_addr_(DEFAULT_ADDRESS),
+      remote_addr_(DEFAULT_ADDRESS),
+      iface_(""),
+      ifindex_(0),
+      local_port_(DHCP4_SERVER_PORT),
+      remote_port_(DHCP4_CLIENT_PORT),
+      op_(DHCPTypeToBootpType(msg_type)),
+      htype_(HTYPE_ETHER),
+      hlen_(0),
+      hops_(0),
+      transid_(transid),
+      secs_(0),
+      flags_(0),
+      ciaddr_(DEFAULT_ADDRESS),
+      yiaddr_(DEFAULT_ADDRESS),
+      siaddr_(DEFAULT_ADDRESS),
+      giaddr_(DEFAULT_ADDRESS),
+      bufferIn_(NULL, 0), // not used, this is TX packet
+      bufferOut_(DHCPV4_PKT_HDR_LEN),
+      msg_type_(msg_type)
+{
+    memset(chaddr_, 0, MAX_CHADDR_LEN);
+    memset(sname_, 0, MAX_SNAME_LEN);
+    memset(file_, 0, MAX_FILE_LEN);
+}
+
+Pkt4::Pkt4(const uint8_t* data, size_t len)
+     :local_addr_(DEFAULT_ADDRESS),
+      remote_addr_(DEFAULT_ADDRESS),
+      iface_(""),
+      ifindex_(-1),
+      local_port_(DHCP4_SERVER_PORT),
+      remote_port_(DHCP4_CLIENT_PORT),
+      op_(BOOTREQUEST),
+      transid_(0),
+      secs_(0),
+      flags_(0),
+      ciaddr_(DEFAULT_ADDRESS),
+      yiaddr_(DEFAULT_ADDRESS),
+      siaddr_(DEFAULT_ADDRESS),
+      giaddr_(DEFAULT_ADDRESS),
+      bufferIn_(data, len),
+      bufferOut_(0), // not used, this is RX packet
+      msg_type_(DHCPDISCOVER)
+{
+    if (len < DHCPV4_PKT_HDR_LEN) {
+        isc_throw(OutOfRange, "Truncated DHCPv4 packet (len=" << len
+                  << " received, at least " << DHCPV4_PKT_HDR_LEN
+                  << "is expected");
+    }
+}
+
+size_t
+Pkt4::len() {
+    size_t length = DHCPV4_PKT_HDR_LEN; // DHCPv4 header
+
+    // ... and sum of lengths of all options
+    for (Option::OptionCollection::const_iterator it = options_.begin();
+         it != options_.end();
+         ++it) {
+        length += (*it).second->len();
+    }
+
+    return (length);
+}
+
+bool
+Pkt4::pack() {
+    bufferOut_.writeUint8(op_);
+    bufferOut_.writeUint8(htype_);
+    bufferOut_.writeUint8(hlen_);
+    bufferOut_.writeUint8(hops_);
+    bufferOut_.writeUint32(transid_);
+    bufferOut_.writeUint16(secs_);
+    bufferOut_.writeUint16(flags_);
+    bufferOut_.writeUint32(ciaddr_);
+    bufferOut_.writeUint32(yiaddr_);
+    bufferOut_.writeUint32(siaddr_);
+    bufferOut_.writeUint32(giaddr_);
+    bufferOut_.writeData(chaddr_, MAX_CHADDR_LEN);
+    bufferOut_.writeData(sname_, MAX_SNAME_LEN);
+    bufferOut_.writeData(file_, MAX_FILE_LEN);
+
+    LibDHCP::packOptions(bufferOut_, options_);
+
+    // add END option that indicates end of options
+    // (End option is very simple, just a 255 octet)
+    bufferOut_.writeUint8(DHO_END);
+
+    return (true);
+}
+bool
+Pkt4::unpack() {
+    if (bufferIn_.getLength()<DHCPV4_PKT_HDR_LEN) {
+        isc_throw(OutOfRange, "Received truncated DHCPv4 packet (len="
+                  << bufferIn_.getLength() << " received, at least "
+                  << DHCPV4_PKT_HDR_LEN << "is expected");
+    }
+
+    op_ = bufferIn_.readUint8();
+    htype_ = bufferIn_.readUint8();
+    hlen_ = bufferIn_.readUint8();
+    hops_ = bufferIn_.readUint8();
+    transid_ = bufferIn_.readUint32();
+    secs_ = bufferIn_.readUint16();
+    flags_ = bufferIn_.readUint16();
+    ciaddr_ = IOAddress(bufferIn_.readUint32());
+    yiaddr_ = IOAddress(bufferIn_.readUint32());
+    siaddr_ = IOAddress(bufferIn_.readUint32());
+    giaddr_ = IOAddress(bufferIn_.readUint32());
+    bufferIn_.readData(chaddr_, MAX_CHADDR_LEN);
+    bufferIn_.readData(sname_, MAX_SNAME_LEN);
+    bufferIn_.readData(file_, MAX_FILE_LEN);
+
+    size_t opts_len = bufferIn_.getLength() - bufferIn_.getPosition();
+    vector<uint8_t> optsBuffer;
+    // fist use of readVector
+    bufferIn_.readVector(optsBuffer, opts_len);
+    LibDHCP::unpackOptions4(optsBuffer, options_);
+
+    return (true);
+}
+
+std::string
+Pkt4::toText() {
+    stringstream tmp;
+    tmp << "localAddr=[" << local_addr_.toText() << "]:" << local_port_
+        << " remoteAddr=[" << remote_addr_.toText()
+        << "]:" << remote_port_ << endl;
+    tmp << "msgtype=" << msg_type_
+        << ", transid=0x" << hex << transid_ << dec
+        << endl;
+
+    return tmp.str();
+}
+
+void
+Pkt4::setHWAddr(uint8_t hType, uint8_t hlen,
+                const std::vector<uint8_t>& macAddr) {
+    /// TODO Rewrite this once support for client-identifier option
+    /// is implemented (ticket 1228?)
+    if (hlen>MAX_CHADDR_LEN) {
+        isc_throw(OutOfRange, "Hardware address (len=" << hlen
+                  << " too long. Max " << MAX_CHADDR_LEN << " supported.");
+    }
+    if ( (macAddr.size() == 0) && (hlen > 0) ) {
+        isc_throw(OutOfRange, "Invalid HW Address specified");
+    }
+
+    htype_ = hType;
+    hlen_ = hlen;
+    memset(chaddr_, 0, MAX_CHADDR_LEN);
+    memcpy(chaddr_, &macAddr[0], hlen);
+}
+
+void
+Pkt4::setSname(const uint8_t* sname, size_t snameLen /*= MAX_SNAME_LEN*/) {
+    if (snameLen > MAX_SNAME_LEN) {
+        isc_throw(OutOfRange, "sname field (len=" << snameLen
+                  << ") too long, Max " << MAX_SNAME_LEN << " supported.");
+    }
+    memset(sname_, 0, MAX_SNAME_LEN);
+    memcpy(sname_, sname, snameLen);
+
+    // no need to store snameLen as any empty space is filled with 0s
+}
+
+void
+Pkt4::setFile(const uint8_t* file, size_t fileLen /*= MAX_FILE_LEN*/) {
+    if (fileLen > MAX_FILE_LEN) {
+        isc_throw(OutOfRange, "file field (len=" << fileLen
+                  << ") too long, Max " << MAX_FILE_LEN << " supported.");
+    }
+    memset(file_, 0, MAX_FILE_LEN);
+    memcpy(file_, file, fileLen);
+
+    // no need to store fileLen as any empty space is filled with 0s
+}
+
+uint8_t
+Pkt4::DHCPTypeToBootpType(uint8_t dhcpType) {
+    switch (dhcpType) {
+    case DHCPDISCOVER:
+    case DHCPREQUEST:
+    case DHCPDECLINE:
+    case DHCPRELEASE:
+    case DHCPINFORM:
+    case DHCPLEASEQUERY:
+        return (BOOTREQUEST);
+    case DHCPACK:
+    case DHCPNAK:
+    case DHCPOFFER:
+    case DHCPLEASEUNASSIGNED:
+    case DHCPLEASEUNKNOWN:
+    case DHCPLEASEACTIVE:
+        return (BOOTREPLY);
+    default:
+        isc_throw(OutOfRange, "Invalid message type: "
+                  << static_cast<int>(dhcpType) );
+    }
+}
+
+void
+Pkt4::addOption(boost::shared_ptr<Option> opt) {
+    // check for uniqueness (DHCPv4 options must be unique)
+    if (getOption(opt->getType())) {
+        isc_throw(BadValue, "Option " << opt->getType()
+                  << " already present in this message.");
+    }
+    options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Pkt4::getOption(uint8_t type) {
+    Option::OptionCollection::const_iterator x = options_.find(type);
+    if (x!=options_.end()) {
+        return (*x).second;
+    }
+    return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+
+} // end of namespace isc::dhcp
+
+} // end of namespace isc
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
new file mode 100644
index 0000000..8517091
--- /dev/null
+++ b/src/lib/dhcp/pkt4.h
@@ -0,0 +1,409 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef PKT4_H
+#define PKT4_H
+
+#include <iostream>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include "asiolink/io_address.h"
+#include "util/buffer.h"
+#include "dhcp/option.h"
+
+namespace isc {
+
+namespace dhcp {
+
+class Pkt4 {
+public:
+
+    /// length of the CHADDR field in DHCPv4 message
+    const static size_t MAX_CHADDR_LEN = 16;
+
+    /// length of the SNAME field in DHCPv4 message
+    const static size_t MAX_SNAME_LEN = 64;
+
+    /// length of the FILE field in DHCPv4 message
+    const static size_t MAX_FILE_LEN = 128;
+
+    /// specifies DHCPv4 packet header length (fixed part)
+    const static size_t DHCPV4_PKT_HDR_LEN = 236;
+
+    /// Constructor, used in replying to a message.
+    ///
+    /// @param msg_type type of message (e.g. DHCPDISOVER=1)
+    /// @param transid transaction-id
+    Pkt4(uint8_t msg_type, uint32_t transid);
+
+    /// @brief Constructor, used in message reception.
+    ///
+    /// Creates new message. Pkt4 will copy data to bufferIn_
+    /// buffer on creation.
+    ///
+    /// @param data pointer to received data
+    /// @param len size of buffer to be allocated for this packet.
+    Pkt4(const uint8_t* data, size_t len);
+
+    /// @brief Prepares on-wire format of DHCPv4 packet.
+    ///
+    /// Prepares on-wire format of message and all its options.
+    /// Options must be stored in options_ field.
+    /// Output buffer will be stored in bufferOut_.
+    ///
+    /// @return true if packing procedure was successful
+    bool
+    pack();
+
+    /// @brief Parses on-wire form of DHCPv4 packet.
+    ///
+    /// Parses received packet, stored in on-wire format in bufferIn_.
+    ///
+    /// Will create a collection of option objects that will
+    /// be stored in options_ container.
+    ///
+    /// @return true, if parsing was successful
+    bool
+    unpack();
+
+    /// @brief Returns text representation of the packet.
+    ///
+    /// This function is useful mainly for debugging.
+    ///
+    /// @return string with text representation
+    std::string
+    toText();
+
+    /// @brief Returns the size of the required buffer to build the packet.
+    ///
+    /// Returns the size of the required buffer to build the packet with
+    /// the current set of packet options.
+    ///
+    /// @return number of bytes required to build this packet
+    size_t
+    len();
+
+    /// Sets hops field
+    ///
+    /// @param hops value to be set
+    void
+    setHops(uint8_t hops) { hops_ = hops; };
+
+    /// Returns hops field
+    ///
+    /// @return hops field
+    uint8_t
+    getHops() const { return (hops_); };
+
+    // Note: There's no need to manipulate OP field directly,
+    // thus no setOp() method. See op_ comment.
+
+    /// Returns op field
+    ///
+    /// @return op field
+    uint8_t
+    getOp() const { return (op_); };
+
+    /// Sets secs field
+    ///
+    /// @param secs value to be set
+    void
+    setSecs(uint16_t secs) { secs_ = secs; };
+
+    /// Returns secs field
+    ///
+    /// @return secs field
+    uint16_t
+    getSecs() const { return (secs_); };
+
+    /// Sets flags field
+    ///
+    /// @param flags value to be set
+    void
+    setFlags(uint16_t flags) { flags_ = flags; };
+
+    /// Returns flags field
+    ///
+    /// @return flags field
+    uint16_t
+    getFlags() const { return (flags_); };
+
+
+    /// Returns ciaddr field
+    ///
+    /// @return ciaddr field
+    const isc::asiolink::IOAddress&
+    getCiaddr() const { return (ciaddr_); };
+
+    /// Sets ciaddr field
+    ///
+    /// @param ciaddr value to be set
+    void
+    setCiaddr(const isc::asiolink::IOAddress& ciaddr) { ciaddr_ = ciaddr; };
+
+
+    /// Returns siaddr field
+    ///
+    /// @return siaddr field
+    const isc::asiolink::IOAddress&
+    getSiaddr() const { return (siaddr_); };
+
+    /// Sets siaddr field
+    ///
+    /// @param siaddr value to be set
+    void
+    setSiaddr(const isc::asiolink::IOAddress& siaddr) { siaddr_ = siaddr; };
+
+
+    /// Returns yiaddr field
+    ///
+    /// @return yiaddr field
+    const isc::asiolink::IOAddress&
+    getYiaddr() const { return (yiaddr_); };
+
+    /// Sets yiaddr field
+    ///
+    /// @param yiaddr value to be set
+    void
+    setYiaddr(const isc::asiolink::IOAddress& yiaddr) { yiaddr_ = yiaddr; };
+
+
+    /// Returns giaddr field
+    ///
+    /// @return giaddr field
+    const isc::asiolink::IOAddress&
+    getGiaddr() const { return (giaddr_); };
+
+    /// Sets giaddr field
+    ///
+    /// @param giaddr value to be set
+    void
+    setGiaddr(const isc::asiolink::IOAddress& giaddr) { giaddr_ = giaddr; };
+
+    /// Returns value of transaction-id field
+    ///
+    /// @return transaction-id
+    uint32_t getTransid() const { return (transid_); };
+
+    /// Returns message type (e.g. 1 = DHCPDISCOVER)
+    ///
+    /// @return message type
+    uint8_t
+    getType() const { return (msg_type_); }
+
+    /// Sets message type (e.g. 1 = DHCPDISCOVER)
+    ///
+    /// @param type message type to be set
+    void setType(uint8_t type) { msg_type_=type; };
+
+    /// @brief Returns sname field
+    ///
+    /// Note: This is 64 bytes long field. It doesn't have to be
+    /// null-terminated. Do not use strlen() or similar on it.
+    ///
+    /// @return sname field
+    const std::vector<uint8_t>
+    getSname() const { return (std::vector<uint8_t>(sname_, &sname_[MAX_SNAME_LEN])); };
+
+    /// Sets sname field
+    ///
+    /// @param sname value to be set
+    void
+    setSname(const uint8_t* sname, size_t snameLen = MAX_SNAME_LEN);
+
+    /// @brief Returns file field
+    ///
+    /// Note: This is 128 bytes long field. It doesn't have to be
+    /// null-terminated. Do not use strlen() or similar on it.
+    ///
+    /// @return pointer to file field
+    const std::vector<uint8_t>
+    getFile() const { return (std::vector<uint8_t>(file_, &file_[MAX_FILE_LEN])); };
+
+    /// Sets file field
+    ///
+    /// @param file value to be set
+    void
+    setFile(const uint8_t* file, size_t fileLen = MAX_FILE_LEN);
+
+    /// @brief Sets hardware address.
+    ///
+    /// Sets parameters of hardware address. hlen specifies
+    /// length of macAddr buffer. Content of macAddr buffer
+    /// will be copied to appropriate field.
+    ///
+    /// Note: macAddr must be a buffer of at least hlen bytes.
+    ///
+    /// @param hwType hardware type (will be sent in htype field)
+    /// @param hlen hardware length (will be sent in hlen field)
+    /// @param macAddr pointer to hardware address
+    void setHWAddr(uint8_t hType, uint8_t hlen,
+                   const std::vector<uint8_t>& macAddr);
+
+    /// Returns htype field
+    ///
+    /// @return hardware type
+    uint8_t
+    getHtype() const { return (htype_); };
+
+    /// Returns hlen field
+    ///
+    /// @return hardware address length
+    uint8_t
+    getHlen() const { return (hlen_); };
+
+    /// @brief Returns chaddr field.
+    ///
+    /// Note: This is 16 bytes long field. It doesn't have to be
+    /// null-terminated. Do no use strlen() or similar on it.
+    ///
+    /// @return pointer to hardware address
+    const uint8_t*
+    getChaddr() const { return (chaddr_); };
+
+
+    /// @brief Returns reference to output buffer.
+    ///
+    /// Returned buffer will contain reasonable data only for
+    /// output (TX) packet and after pack() was called. This buffer
+    /// is only valid till Pkt4 object is valid.
+    ///
+    /// RX packet or TX packet before pack() will return buffer with
+    /// zero length
+    ///
+    /// @return reference to output buffer
+    const isc::util::OutputBuffer&
+    getBuffer() const { return (bufferOut_); };
+
+    /// @brief Add an option.
+    ///
+    /// Throws BadValue if option with that type is already present.
+    ///
+    /// @param opt option to be added
+    void
+    addOption(boost::shared_ptr<Option> opt);
+
+    /// @brief Returns an option of specified type.
+    ///
+    /// @return returns option of requested type (or NULL)
+    ///         if no such option is present
+
+    boost::shared_ptr<Option>
+    getOption(uint8_t opt_type);
+
+protected:
+
+    /// converts DHCP message type to BOOTP op type
+    ///
+    /// @param dhcpType DHCP message type (e.g. DHCPDISCOVER)
+    ///
+    /// @return BOOTP type (BOOTREQUEST or BOOTREPLY)
+    uint8_t
+    DHCPTypeToBootpType(uint8_t dhcpType);
+
+    /// local address (dst if receiving packet, src if sending packet)
+    isc::asiolink::IOAddress local_addr_;
+
+    /// remote address (src if receiving packet, dst if sending packet)
+    isc::asiolink::IOAddress remote_addr_;
+
+    /// name of the network interface the packet was received/to be sent over
+    std::string iface_;
+
+    /// @brief interface index
+    ///
+    /// Each network interface has assigned unique ifindex. It is functional
+    /// equvalent of name, but sometimes more useful, e.g. when using crazy
+    /// systems that allow spaces in interface names e.g. MS Windows)
+    int ifindex_;
+
+    /// local UDP port
+    int local_port_;
+
+    /// remote UDP port
+    int remote_port_;
+
+    /// @brief message operation code
+    ///
+    /// Note: This is legacy BOOTP field. There's no need to manipulate it
+    /// directly. Its value is set based on DHCP message type. Note that
+    /// DHCPv4 protocol reuses BOOTP message format, so this field is
+    /// kept due to BOOTP format. This is NOT DHCPv4 type (DHCPv4 message
+    /// type is kept in message type option).
+    uint8_t op_;
+
+    /// link-layer address type
+    uint8_t htype_;
+
+    /// link-layer address length
+    uint8_t hlen_;
+
+    /// Number of relay agents traversed
+    uint8_t hops_;
+
+    /// DHCPv4 transaction-id (32 bits, not 24 bits as in DHCPv6)
+    uint32_t transid_;
+
+    /// elapsed (number of seconds since beginning of transmission)
+    uint16_t secs_;
+
+    /// flags
+    uint16_t flags_;
+
+    /// ciaddr field (32 bits): Client's IP address
+    isc::asiolink::IOAddress ciaddr_;
+
+    /// yiaddr field (32 bits): Client's IP address ("your"), set by server
+    isc::asiolink::IOAddress yiaddr_;
+
+    /// siaddr field (32 bits): next server IP address in boot process(e.g.TFTP)
+    isc::asiolink::IOAddress siaddr_;
+
+    /// giaddr field (32 bits): Gateway IP address
+    isc::asiolink::IOAddress giaddr_;
+
+    /// Hardware address field (16 bytes)
+    uint8_t chaddr_[MAX_CHADDR_LEN];
+
+    /// sname field (64 bytes)
+    uint8_t sname_[MAX_SNAME_LEN];
+
+    /// file field (128 bytes)
+    uint8_t file_[MAX_FILE_LEN];
+
+    // end of real DHCPv4 fields
+
+    /// input buffer (used during message reception)
+    /// Note that it must be modifiable as hooks can modify incoming buffer),
+    /// thus OutputBuffer, not InputBuffer
+    isc::util::InputBuffer bufferIn_;
+
+    /// output buffer (used during message
+    isc::util::OutputBuffer bufferOut_;
+
+    /// message type (e.g. 1=DHCPDISCOVER)
+    /// TODO: this will eventually be replaced with DHCP Message Type
+    /// option (option 53)
+    uint8_t msg_type_;
+
+    /// collection of options present in this message
+    isc::dhcp::Option::OptionCollection options_;
+}; // Pkt4 class
+
+} // isc::dhcp namespace
+
+} // isc namespace
+
+#endif
diff --git a/src/lib/dhcp/pkt6.cc b/src/lib/dhcp/pkt6.cc
new file mode 100644
index 0000000..84c5729
--- /dev/null
+++ b/src/lib/dhcp/pkt6.cc
@@ -0,0 +1,232 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/pkt6.h"
+#include "dhcp/libdhcp.h"
+#include "exceptions/exceptions.h"
+#include <iostream>
+#include <sstream>
+
+using namespace std;
+using namespace isc::dhcp;
+
+namespace isc {
+
+Pkt6::Pkt6(unsigned int dataLen, DHCPv6Proto proto /* = UDP */)
+    :data_len_(dataLen),
+     local_addr_("::"),
+     remote_addr_("::"),
+     iface_(""),
+     ifindex_(-1),
+     local_port_(-1),
+     remote_port_(-1),
+     proto_(proto),
+     msg_type_(-1),
+     transid_(rand()%0xffffff)
+{
+
+    data_ = boost::shared_array<uint8_t>(new uint8_t[dataLen]);
+    data_len_ = dataLen;
+}
+
+Pkt6::Pkt6(uint8_t msg_type,
+           unsigned int transid,
+           DHCPv6Proto proto /*= UDP*/)
+    :local_addr_("::"),
+     remote_addr_("::"),
+     iface_(""),
+     ifindex_(-1),
+     local_port_(-1),
+     remote_port_(-1),
+     proto_(proto),
+     msg_type_(msg_type),
+     transid_(transid) {
+
+    data_ = boost::shared_array<uint8_t>(new uint8_t[4]);
+    data_len_ = 4;
+}
+
+unsigned short
+Pkt6::len() {
+    unsigned int length = DHCPV6_PKT_HDR_LEN; // DHCPv6 header
+
+    for (Option::OptionCollection::iterator it = options_.begin();
+         it != options_.end();
+         ++it) {
+        length += (*it).second->len();
+    }
+
+    return (length);
+}
+
+
+bool
+Pkt6::pack() {
+    switch (proto_) {
+    case UDP:
+        return packUDP();
+    case TCP:
+        return packTCP();
+    default:
+        isc_throw(BadValue, "Invalid protocol specified (non-TCP, non-UDP)");
+    }
+    return (false); // never happens
+}
+
+bool
+Pkt6::packUDP() {
+
+    // TODO: Once OutputBuffer is used here, some thing like this
+    // will be used. Yikes! That's ugly.
+    // bufferOut_.writeData(ciaddr_.getAddress().to_v6().to_bytes().data(), 16);
+    // It is better to implement a method in IOAddress that extracts
+    // vector<uint8_t>
+
+    unsigned short length = len();
+    if (data_len_ < length) {
+        cout << "Previous len=" << data_len_ << ", allocating new buffer: len="
+             << length << endl;
+
+        // May throw exception if out of memory. That is rather fatal,
+        // so we don't catch this
+        data_ = boost::shared_array<uint8_t>(new uint8_t[length]);
+        data_len_ = length;
+    }
+
+    data_len_ = length;
+    try {
+        // DHCPv6 header: message-type (1 octect) + transaction id (3 octets)
+        data_[0] = msg_type_;
+
+        // store 3-octet transaction-id
+        data_[1] = (transid_ >> 16) & 0xff;
+        data_[2] = (transid_ >> 8) & 0xff;
+        data_[3] = (transid_) & 0xff;
+
+        // the rest are options
+        unsigned short offset = LibDHCP::packOptions6(data_, length,
+                                                      4/*offset*/,
+                                                      options_);
+
+        // sanity check
+        if (offset != length) {
+            isc_throw(OutOfRange, "Packet build failed: expected size="
+                      << length << ", actual len=" << offset);
+        }
+    }
+    catch (const Exception& e) {
+        cout << "Packet build failed:" << e.what() << endl;
+        return (false);
+    }
+    // Limited verbosity of this method
+    // cout << "Packet built, len=" << len() << endl;
+    return (true);
+}
+
+bool
+Pkt6::packTCP() {
+    /// TODO Implement this function.
+    isc_throw(Unexpected, "DHCPv6 over TCP (bulk leasequery and failover)"
+              "not implemented yet.");
+}
+
+bool
+Pkt6::unpack() {
+    switch (proto_) {
+    case UDP:
+        return unpackUDP();
+    case TCP:
+        return unpackTCP();
+    default:
+        isc_throw(BadValue, "Invalid protocol specified (non-TCP, non-UDP)");
+    }
+    return (false); // never happens
+}
+
+bool
+Pkt6::unpackUDP() {
+    if (data_len_ < 4) {
+        std::cout << "DHCPv6 packet truncated. Only " << data_len_
+                  << " bytes. Need at least 4." << std::endl;
+        return (false);
+    }
+    msg_type_ = data_[0];
+    transid_ = ( (data_[1]) << 16 ) +
+        ((data_[2]) << 8) + (data_[3]);
+    transid_ = transid_ & 0xffffff;
+
+    unsigned int offset = LibDHCP::unpackOptions6(data_,
+                                                  data_len_,
+                                                  4, //offset
+                                                  data_len_ - 4,
+                                                  options_);
+    if (offset != data_len_) {
+        cout << "DHCPv6 packet contains trailing garbage. Parsed "
+             << offset << " bytes, packet is " << data_len_ << " bytes."
+             << endl;
+        // just a warning. Ignore trailing garbage and continue
+    }
+    return (true);
+}
+
+bool
+Pkt6::unpackTCP() {
+    isc_throw(Unexpected, "DHCPv6 over TCP (bulk leasequery and failover) "
+              "not implemented yet.");
+}
+
+
+std::string
+Pkt6::toText() {
+    stringstream tmp;
+    tmp << "localAddr=[" << local_addr_.toText() << "]:" << local_port_
+        << " remoteAddr=[" << remote_addr_.toText()
+        << "]:" << remote_port_ << endl;
+    tmp << "msgtype=" << msg_type_ << ", transid=0x" << hex << transid_
+        << dec << endl;
+    for (isc::dhcp::Option::OptionCollection::iterator opt=options_.begin();
+         opt != options_.end();
+         ++opt) {
+        tmp << opt->second->toText() << std::endl;
+    }
+    return tmp.str();
+}
+
+boost::shared_ptr<isc::dhcp::Option>
+Pkt6::getOption(unsigned short opt_type) {
+    isc::dhcp::Option::OptionCollection::const_iterator x = options_.find(opt_type);
+    if (x!=options_.end()) {
+        return (*x).second;
+    }
+    return boost::shared_ptr<isc::dhcp::Option>(); // NULL
+}
+
+void
+Pkt6::addOption(boost::shared_ptr<Option> opt) {
+    options_.insert(pair<int, boost::shared_ptr<Option> >(opt->getType(), opt));
+}
+
+bool
+Pkt6::delOption(unsigned short type) {
+    isc::dhcp::Option::OptionCollection::iterator x = options_.find(type);
+    if (x!=options_.end()) {
+        options_.erase(x);
+        return (true); // delete successful
+    }
+    return (false); // can't find option to be deleted
+}
+
+};
diff --git a/src/lib/dhcp/pkt6.h b/src/lib/dhcp/pkt6.h
new file mode 100644
index 0000000..019eeb2
--- /dev/null
+++ b/src/lib/dhcp/pkt6.h
@@ -0,0 +1,234 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef PKT6_H
+#define PKT6_H
+
+#include <iostream>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include "asiolink/io_address.h"
+#include "dhcp/option.h"
+
+namespace isc {
+
+namespace dhcp {
+
+class Pkt6 {
+public:
+    /// specifes DHCPv6 packet header length
+    const static size_t DHCPV6_PKT_HDR_LEN = 4;
+
+    /// DHCPv6 transport protocol
+    enum DHCPv6Proto {
+        UDP = 0, // most packets are UDP
+        TCP = 1  // there are TCP DHCPv6 packets (bulk leasequery, failover)
+    };
+
+    /// Constructor, used in replying to a message
+    ///
+    /// @param msg_type type of message (SOLICIT=1, ADVERTISE=2, ...)
+    /// @param transid transaction-id
+    /// @param proto protocol (TCP or UDP)
+    Pkt6(unsigned char msg_type,
+         unsigned int transid,
+         DHCPv6Proto proto = UDP);
+
+    /// Constructor, used in message transmission
+    ///
+    /// Creates new message. Transaction-id will randomized.
+    ///
+    /// @param len size of buffer to be allocated for this packet.
+    /// @param proto protocol (usually UDP, but TCP will be supported eventually)
+    Pkt6(unsigned int len, DHCPv6Proto proto = UDP);
+
+    /// @brief Prepares on-wire format.
+    ///
+    /// Prepares on-wire format of message and all its options.
+    /// Options must be stored in options_ field.
+    /// Output buffer will be stored in data_. Length
+    /// will be set in data_len_.
+    ///
+    /// @return true if packing procedure was successful
+    bool
+    pack();
+
+    /// @brief Dispatch method that handles binary packet parsing.
+    ///
+    /// This method calls appropriate dispatch function (unpackUDP or
+    /// unpackTCP).
+    ///
+    /// @return true if parsing was successful
+    bool
+    unpack();
+
+    /// Returns protocol of this packet (UDP or TCP)
+    ///
+    /// @return protocol type
+    DHCPv6Proto
+    getProto();
+
+    /// Sets protocol of this packet.
+    ///
+    /// @param proto protocol (UDP or TCP)
+    ///
+    void
+    setProto(DHCPv6Proto proto = UDP) { proto_ = proto; }
+
+    /// @brief Returns text representation of the packet.
+    ///
+    /// This function is useful mainly for debugging.
+    ///
+    /// @return string with text representation
+    std::string
+    toText();
+
+    /// @brief Returns calculated length of the packet.
+    ///
+    /// This function returns size of required buffer to buld this packet.
+    /// To use that function, options_ field must be set.
+    ///
+    /// @return number of bytes required to build this packet
+    unsigned short
+    len();
+
+    /// Returns message type (e.g. 1 = SOLICIT)
+    ///
+    /// @return message type
+    unsigned char
+    getType() { return (msg_type_); }
+
+    /// Sets message type (e.g. 1 = SOLICIT)
+    ///
+    /// @param type message type to be set
+    void setType(unsigned char type) { msg_type_=type; };
+
+    /// Returns value of transaction-id field
+    ///
+    /// @return transaction-id
+    unsigned int getTransid() { return (transid_); };
+
+    /// Adds an option to this packet.
+    ///
+    /// @param opt option to be added.
+    void addOption(boost::shared_ptr<isc::dhcp::Option> opt);
+
+    /// @brief Returns the first option of specified type.
+    ///
+    /// Returns the first option of specified type. Note that in DHCPv6 several
+    /// instances of the same option are allowed (and frequently used).
+    /// See getOptions().
+    ///
+    /// @param opt_type option type we are looking for
+    ///
+    /// @return pointer to found option (or NULL)
+    boost::shared_ptr<isc::dhcp::Option>
+    getOption(unsigned short type);
+
+    /// Attempts to delete first suboption of requested type
+    ///
+    /// @param type Type of option to be deleted.
+    ///
+    /// @return true if option was deleted, false if no such option existed
+    bool
+    delOption(unsigned short type);
+
+    /// TODO need getter/setter wrappers
+    ///      and hide following fields as protected
+
+    /// buffer that holds memory. It is shared_array as options may
+    /// share pointer to this buffer
+    boost::shared_array<uint8_t> data_;
+
+    /// length of the data
+    unsigned int data_len_;
+
+    /// local address (dst if receiving packet, src if sending packet)
+    isc::asiolink::IOAddress local_addr_;
+
+    /// remote address (src if receiving packet, dst if sending packet)
+    isc::asiolink::IOAddress remote_addr_;
+
+    /// name of the network interface the packet was received/to be sent over
+    std::string iface_;
+
+    /// @brief interface index
+    ///
+    /// interface index (each network interface has assigned unique ifindex
+    /// it is functional equvalent of name, but sometimes more useful, e.g.
+    /// when using crazy systems that allow spaces in interface names
+    /// e.g. windows
+    int ifindex_;
+
+    /// local TDP or UDP port
+    int local_port_;
+
+    /// remote TCP or UDP port
+    int remote_port_;
+
+    /// TODO Need to implement getOptions() as well
+
+    /// collection of options present in this message
+    isc::dhcp::Option::OptionCollection options_;
+
+protected:
+    /// Builds on wire packet for TCP transmission.
+    ///
+    /// TODO This function is not implemented yet.
+    ///
+    /// @return true, if build was successful
+    bool packTCP();
+
+    /// Builds on wire packet for UDP transmission.
+    ///
+    /// @return true, if build was successful
+    bool packUDP();
+
+    /// @brief Parses on-wire form of TCP DHCPv6 packet.
+    ///
+    /// Parses received packet, stored in on-wire format in data_.
+    /// data_len_ must be set to indicate data length.
+    /// Will create a collection of option objects that will
+    /// be stored in options_ container.
+    ///
+    /// TODO This function is not implemented yet.
+    ///
+    /// @return true, if build was successful
+    bool unpackTCP();
+
+    /// @brief Parses on-wire form of UDP DHCPv6 packet.
+    ///
+    /// Parses received packet, stored in on-wire format in data_.
+    /// data_len_ must be set to indicate data length.
+    /// Will create a collection of option objects that will
+    /// be stored in options_ container.
+    ///
+    /// @return true, if build was successful
+    bool unpackUDP();
+
+    /// UDP (usually) or TCP (bulk leasequery or failover)
+    DHCPv6Proto proto_;
+
+    /// DHCPv6 message type
+    int msg_type_;
+
+    /// DHCPv6 transaction-id
+    unsigned int transid_;
+}; // Pkt6 class
+
+} // isc::dhcp namespace
+
+} // isc namespace
+
+#endif
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
new file mode 100644
index 0000000..176992f
--- /dev/null
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -0,0 +1,42 @@
+SUBDIRS = .
+
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
+
+TESTS =
+if HAVE_GTEST
+TESTS += libdhcp_unittests
+libdhcp_unittests_SOURCES  = run_unittests.cc
+libdhcp_unittests_SOURCES += ../libdhcp.h ../libdhcp.cc libdhcp_unittest.cc
+libdhcp_unittests_SOURCES += ../option6_iaaddr.h ../option6_iaaddr.cc option6_iaaddr_unittest.cc
+libdhcp_unittests_SOURCES += ../option6_ia.h ../option6_ia.cc option6_ia_unittest.cc
+libdhcp_unittests_SOURCES += ../option6_addrlst.h ../option6_addrlst.cc option6_addrlst_unittest.cc
+libdhcp_unittests_SOURCES += ../option4_addrlst.cc ../option4_addrlst.h option4_addrlst_unittest.cc
+libdhcp_unittests_SOURCES += ../option.h ../option.cc option_unittest.cc
+libdhcp_unittests_SOURCES += ../pkt6.h ../pkt6.cc pkt6_unittest.cc
+libdhcp_unittests_SOURCES += ../pkt4.h ../pkt4.cc pkt4_unittest.cc
+
+libdhcp_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES) $(LOG4CPLUS_INCLUDES)
+libdhcp_unittests_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
+
+libdhcp_unittests_CXXFLAGS = $(AM_CXXFLAGS)
+if USE_CLANGPP
+# This is to workaround unused variables tcout and tcerr in
+# log4cplus's streams.h.
+libdhcp_unittests_CXXFLAGS += -Wno-unused-variable
+endif
+libdhcp_unittests_LDADD  = $(GTEST_LDADD)
+libdhcp_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+libdhcp_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
+libdhcp_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
+libdhcp_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/dhcp/tests/libdhcp_unittest.cc b/src/lib/dhcp/tests/libdhcp_unittest.cc
new file mode 100644
index 0000000..11b618c
--- /dev/null
+++ b/src/lib/dhcp/tests/libdhcp_unittest.cc
@@ -0,0 +1,234 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <util/buffer.h>
+#include <dhcp/libdhcp.h>
+#include "config.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+namespace {
+class LibDhcpTest : public ::testing::Test {
+public:
+    LibDhcpTest() {
+    }
+};
+
+static const uint8_t packed[] = {
+    0, 12, 0, 5, 100, 101, 102, 103, 104, // opt1 (9 bytes)
+    0, 13, 0, 3, 105, 106, 107, // opt2 (7 bytes)
+    0, 14, 0, 2, 108, 109, // opt3 (6 bytes)
+    1,  0, 0, 4, 110, 111, 112, 113, // opt4 (8 bytes)
+    1,  1, 0, 1, 114 // opt5 (5 bytes)
+};
+
+TEST(LibDhcpTest, packOptions6) {
+    boost::shared_array<uint8_t> buf(new uint8_t[512]);
+    isc::dhcp::Option::OptionCollection opts; // list of options
+
+    // generate content for options
+    for (int i = 0; i < 64; i++) {
+        buf[i]=i+100;
+    }
+
+    boost::shared_ptr<Option> opt1(new Option(Option::V6, 12, buf, 0, 5));
+    boost::shared_ptr<Option> opt2(new Option(Option::V6, 13, buf, 5, 3));
+    boost::shared_ptr<Option> opt3(new Option(Option::V6, 14, buf, 8, 2));
+    boost::shared_ptr<Option> opt4(new Option(Option::V6,256, buf,10, 4));
+    boost::shared_ptr<Option> opt5(new Option(Option::V6,257, buf,14, 1));
+
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt1));
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt2));
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt3));
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt4));
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt5));
+
+    unsigned int offset;
+    EXPECT_NO_THROW ({
+         offset = LibDHCP::packOptions6(buf, 512, 100, opts);
+    });
+    EXPECT_EQ(135, offset); // options should take 35 bytes
+    EXPECT_EQ(0, memcmp(&buf[100], packed, 35) );
+}
+
+TEST(LibDhcpTest, unpackOptions6) {
+
+    // just couple of random options
+    // Option is used as a simple option implementation
+    // More advanced uses are validated in tests dedicated for
+    // specific derived classes.
+    isc::dhcp::Option::OptionCollection options; // list of options
+
+    // we can't use packed directly, as shared_array would try to
+    // free it eventually
+    boost::shared_array<uint8_t> buf(new uint8_t[512]);
+    memcpy(&buf[0], packed, 35);
+
+    unsigned int offset;
+    EXPECT_NO_THROW ({
+        offset = LibDHCP::unpackOptions6(buf, 512, 0, 35, options);
+    });
+
+    EXPECT_EQ(35, offset); // parsed first 35 bytes (offset 0..34)
+    EXPECT_EQ(options.size(), 5); // there should be 5 options
+
+    isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
+    ASSERT_FALSE(x == options.end()); // option 1 should exist
+    EXPECT_EQ(12, x->second->getType());  // this should be option 12
+    ASSERT_EQ(9, x->second->len()); // it should be of length 9
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+4, 5)); // data len=5
+
+    x = options.find(13);
+    ASSERT_FALSE(x == options.end()); // option 13 should exist
+    EXPECT_EQ(13, x->second->getType());  // this should be option 13
+    ASSERT_EQ(7, x->second->len()); // it should be of length 7
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+13, 3)); // data len=3
+
+    x = options.find(14);
+    ASSERT_FALSE(x == options.end()); // option 3 should exist
+    EXPECT_EQ(14, x->second->getType());  // this should be option 14
+    ASSERT_EQ(6, x->second->len()); // it should be of length 6
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+20, 2)); // data len=2
+
+    x = options.find(256);
+    ASSERT_FALSE(x == options.end()); // option 256 should exist
+    EXPECT_EQ(256, x->second->getType());  // this should be option 256
+    ASSERT_EQ(8, x->second->len()); // it should be of length 7
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+26, 4)); // data len=4
+
+    x = options.find(257);
+    ASSERT_FALSE(x == options.end()); // option 257 should exist
+    EXPECT_EQ(257, x->second->getType());  // this should be option 257
+    ASSERT_EQ(5, x->second->len()); // it should be of length 5
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], packed+34, 1)); // data len=1
+
+    x = options.find(0);
+    EXPECT_TRUE(x == options.end()); // option 0 not found
+
+    x = options.find(1); // 1 is htons(256) on little endians. Worth checking
+    EXPECT_TRUE(x == options.end()); // option 1 not found
+
+    x = options.find(2);
+    EXPECT_TRUE(x == options.end()); // option 2 not found
+
+    x = options.find(32000);
+    EXPECT_TRUE(x == options.end()); // option 32000 not found
+}
+
+
+static uint8_t v4Opts[] = {
+    12,  3, 0,   1,  2,
+    13,  3, 10, 11, 12,
+    14,  3, 20, 21, 22,
+    254, 3, 30, 31, 32,
+    128, 3, 40, 41, 42
+};
+
+TEST(LibDhcpTest, packOptions4) {
+
+    vector<uint8_t> payload[5];
+    for (int i = 0; i < 5; i++) {
+        payload[i].resize(3);
+        payload[i][0] = i*10;
+        payload[i][1] = i*10+1;
+        payload[i][2] = i*10+2;
+    }
+
+    boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+    boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+    boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+    boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[3]));
+    boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[4]));
+
+    isc::dhcp::Option::OptionCollection opts; // list of options
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt1));
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt2));
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt3));
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt4));
+    opts.insert(pair<int, boost::shared_ptr<Option> >(opt1->getType(), opt5));
+
+    vector<uint8_t> expVect(v4Opts, v4Opts + sizeof(v4Opts));
+
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW (
+        LibDHCP::packOptions(buf, opts);
+    );
+    ASSERT_EQ(buf.getLength(), sizeof(v4Opts));
+    EXPECT_EQ(0, memcmp(v4Opts, buf.getData(), sizeof(v4Opts)));
+
+}
+
+TEST(LibDhcpTest, unpackOptions4) {
+
+    vector<uint8_t> packed(v4Opts, v4Opts + sizeof(v4Opts));
+    isc::dhcp::Option::OptionCollection options; // list of options
+
+    ASSERT_NO_THROW(
+        LibDHCP::unpackOptions4(packed, options);
+    );
+
+    isc::dhcp::Option::OptionCollection::const_iterator x = options.find(12);
+    ASSERT_FALSE(x == options.end()); // option 1 should exist
+    EXPECT_EQ(12, x->second->getType());  // this should be option 12
+    ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->second->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+2, 3)); // data len=3
+
+    x = options.find(13);
+    ASSERT_FALSE(x == options.end()); // option 1 should exist
+    EXPECT_EQ(13, x->second->getType());  // this should be option 13
+    ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->second->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+7, 3)); // data len=3
+
+    x = options.find(14);
+    ASSERT_FALSE(x == options.end()); // option 3 should exist
+    EXPECT_EQ(14, x->second->getType());  // this should be option 14
+    ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->second->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+12, 3)); // data len=3
+
+    x = options.find(254);
+    ASSERT_FALSE(x == options.end()); // option 3 should exist
+    EXPECT_EQ(254, x->second->getType());  // this should be option 254
+    ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->second->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+17, 3)); // data len=3
+
+    x = options.find(128);
+    ASSERT_FALSE(x == options.end()); // option 3 should exist
+    EXPECT_EQ(128, x->second->getType());  // this should be option 254
+    ASSERT_EQ(3, x->second->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->second->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->second->getData()[0], v4Opts+22, 3)); // data len=3
+
+    x = options.find(0);
+    EXPECT_TRUE(x == options.end()); // option 0 not found
+
+    x = options.find(1);
+    EXPECT_TRUE(x == options.end()); // option 1 not found
+
+    x = options.find(2);
+    EXPECT_TRUE(x == options.end()); // option 2 not found
+}
+
+}
diff --git a/src/lib/dhcp/tests/option4_addrlst_unittest.cc b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
new file mode 100644
index 0000000..d4ecf80
--- /dev/null
+++ b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
@@ -0,0 +1,273 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp4.h>
+#include <dhcp/option.h>
+#include <dhcp/option4_addrlst.h>
+#include <util/buffer.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+namespace {
+
+// a sample data (list of 4 addresses)
+const uint8_t sampledata[] = {
+    192, 0, 2, 3,     // 192.0.2.3
+    255, 255, 255, 0, // 255.255.255.0 - popular netmask
+    0, 0, 0 , 0,      // used for default routes or (any address)
+    127, 0, 0, 1      // loopback
+};
+
+// expected on-wire format for an option with 1 address
+const uint8_t expected1[] = { // 1 address
+    DHO_DOMAIN_NAME_SERVERS, 4, // type, length
+    192, 0, 2, 3,     // 192.0.2.3
+};
+
+// expected on-wire format for an option with 4 addresses
+const uint8_t expected4[] = { // 4 addresses
+    254, 16,            // type = 254, len = 16
+    192, 0, 2, 3,       // 192.0.2.3
+    255, 255, 255, 0,   // 255.255.255.0 - popular netmask
+    0, 0, 0 ,0,         // used for default routes or (any address)
+    127, 0, 0, 1        // loopback
+};
+
+class Option4AddrLstTest : public ::testing::Test {
+protected:
+
+    Option4AddrLstTest():
+        vec_(vector<uint8_t>(300,0)) // 300 bytes long filled with 0s
+    {
+        sampleAddrs_.push_back(IOAddress("192.0.2.3"));
+        sampleAddrs_.push_back(IOAddress("255.255.255.0"));
+        sampleAddrs_.push_back(IOAddress("0.0.0.0"));
+        sampleAddrs_.push_back(IOAddress("127.0.0.1"));
+    }
+
+    vector<uint8_t> vec_;
+    Option4AddrLst::AddressContainer sampleAddrs_;
+
+};
+
+TEST_F(Option4AddrLstTest, parse1) {
+
+    memcpy(&vec_[0], sampledata, sizeof(sampledata));
+
+    // just one address
+    Option4AddrLst* opt1 = 0;
+    EXPECT_NO_THROW(
+        opt1 = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS,
+                                  vec_.begin(),
+                                  vec_.begin()+4);
+        // use just first address (4 bytes), not the whole
+        // sampledata
+    );
+
+    EXPECT_EQ(Option::V4, opt1->getUniverse());
+
+    EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt1->getType());
+    EXPECT_EQ(6, opt1->len()); // 2 (header) + 4 (1x IPv4 addr)
+
+    Option4AddrLst::AddressContainer addrs = opt1->getAddresses();
+    ASSERT_EQ(1, addrs.size());
+
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+    EXPECT_NO_THROW(
+        delete opt1;
+        opt1 = 0;
+    );
+
+    // 1 address
+}
+
+TEST_F(Option4AddrLstTest, parse4) {
+
+    vector<uint8_t> buffer(300,0); // 300 bytes long filled with 0s
+
+    memcpy(&buffer[0], sampledata, sizeof(sampledata));
+
+    // 4 addresses
+    Option4AddrLst* opt4 = 0;
+    EXPECT_NO_THROW(
+        opt4 = new Option4AddrLst(254,
+                                  buffer.begin(),
+                                  buffer.begin()+sizeof(sampledata));
+    );
+
+    EXPECT_EQ(Option::V4, opt4->getUniverse());
+
+    EXPECT_EQ(254, opt4->getType());
+    EXPECT_EQ(18, opt4->len()); // 2 (header) + 16 (4x IPv4 addrs)
+
+    Option4AddrLst::AddressContainer addrs = opt4->getAddresses();
+    ASSERT_EQ(4, addrs.size());
+
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    EXPECT_NO_THROW(
+        delete opt4;
+        opt4 = 0;
+    );
+}
+
+TEST_F(Option4AddrLstTest, assembly1) {
+
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("192.0.2.3"));
+    );
+    EXPECT_EQ(Option::V4, opt->getUniverse());
+    EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt->getType());
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(1, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW(
+        opt->pack4(buf);
+    );
+
+    ASSERT_EQ(6, opt->len());
+    ASSERT_EQ(6, buf.getLength());
+
+    EXPECT_EQ(0, memcmp(expected1, buf.getData(), 6));
+
+    EXPECT_NO_THROW(
+        delete opt;
+        opt = 0;
+    );
+
+    // This is old-fashioned option. We don't serve IPv6 types here!
+    EXPECT_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("2001:db8::1")),
+        BadValue
+    );
+    if (opt) {
+        // test failed. Execption was not thrown, but option was created instead.
+        delete opt;
+    }
+}
+
+TEST_F(Option4AddrLstTest, assembly4) {
+
+
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(254, sampleAddrs_);
+    );
+    EXPECT_EQ(Option::V4, opt->getUniverse());
+    EXPECT_EQ(254, opt->getType());
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(4, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW(
+        opt->pack4(buf);
+    );
+
+    ASSERT_EQ(18, opt->len()); // 2(header) + 4xsizeof(IPv4addr)
+    ASSERT_EQ(18, buf.getLength());
+
+    ASSERT_EQ(0, memcmp(expected4, buf.getData(), 18));
+
+    EXPECT_NO_THROW(
+        delete opt;
+        opt = 0;
+    );
+
+    // This is old-fashioned option. We don't serve IPv6 types here!
+    sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+    EXPECT_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, sampleAddrs_),
+        BadValue
+    );
+    if (opt) {
+        // test failed. Execption was not thrown, but option was created instead.
+        delete opt;
+    }
+}
+
+TEST_F(Option4AddrLstTest, setAddress) {
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(123, IOAddress("1.2.3.4"));
+    );
+    opt->setAddress(IOAddress("192.0.255.255"));
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(1, addrs.size() );
+    EXPECT_EQ("192.0.255.255", addrs[0].toText());
+
+    // We should accept IPv4-only addresses.
+    EXPECT_THROW(
+        opt->setAddress(IOAddress("2001:db8::1")),
+        BadValue
+    );
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+TEST_F(Option4AddrLstTest, setAddresses) {
+
+    Option4AddrLst* opt = 0;
+
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(123); // empty list
+    );
+
+    opt->setAddresses(sampleAddrs_);
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(4, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    // We should accept IPv4-only addresses.
+    sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+    EXPECT_THROW(
+        opt->setAddresses(sampleAddrs_),
+        BadValue
+    );
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+} // namespace
diff --git a/src/lib/dhcp/tests/option6_addrlst_unittest.cc b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
new file mode 100644
index 0000000..60b618b
--- /dev/null
+++ b/src/lib/dhcp/tests/option6_addrlst_unittest.cc
@@ -0,0 +1,232 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp6.h>
+#include <dhcp/option.h>
+#include <dhcp/option6_addrlst.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+
+namespace {
+class Option6AddrLstTest : public ::testing::Test {
+public:
+    Option6AddrLstTest() {
+    }
+};
+
+TEST_F(Option6AddrLstTest, basic) {
+
+    // Limiting tests to just a 2001:db8::/32 as is *wrong*.
+    // Good tests check corner cases as well.
+    // ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff checks
+    // for integer overflow.
+    // ff02::face:b00c checks if multicast addresses
+    // can be represented properly.
+
+    uint8_t sampledata[] = {
+        // 2001:db8:1::dead:beef
+        0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+        0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef,
+
+        // ff02::face:b00c
+        0xff, 02, 0, 0, 0, 0, 0 , 0,
+        0, 0, 0, 0, 0xfa, 0xce, 0xb0, 0x0c,
+
+        // ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+    };
+
+    uint8_t expected1[] = {
+        D6O_NAME_SERVERS/256, D6O_NAME_SERVERS%256,//type
+        0, 16, // len = 16 (1 address)
+        0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+        0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef,
+
+    };
+
+    uint8_t expected2[] = {
+        D6O_SIP_SERVERS_ADDR/256, D6O_SIP_SERVERS_ADDR%256,
+        0, 32, // len = 32 (2 addresses)
+        // 2001:db8:1::dead:beef
+        0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+        0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef,
+
+        // ff02::face:b00c
+        0xff, 02, 0, 0, 0, 0, 0 , 0,
+        0, 0, 0, 0, 0xfa, 0xce, 0xb0, 0x0c,
+
+        // ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+    };
+
+    uint8_t expected3[] = {
+        D6O_NIS_SERVERS/256, D6O_NIS_SERVERS%256,
+        0, 48,
+        // 2001:db8:1::dead:beef
+        0x20, 0x01, 0x0d, 0xb8, 0x00, 0x01, 0, 0,
+        0, 0, 0, 0, 0xde, 0xad, 0xbe, 0xef,
+
+        // ff02::face:b00c
+        0xff, 02, 0, 0, 0, 0, 0 , 0,
+        0, 0, 0, 0, 0xfa, 0xce, 0xb0, 0x0c,
+
+        // ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+        0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+    };
+
+    boost::shared_array<uint8_t> buf(new uint8_t[300]);
+    for (int i = 0; i < 300; i++)
+        buf[i] = 0;
+
+    memcpy(&buf[0], sampledata, 48);
+
+    // just a single address
+    Option6AddrLst* opt1 = 0;
+    EXPECT_NO_THROW(
+        opt1 = new Option6AddrLst(D6O_NAME_SERVERS, buf, 128, 0, 16);
+    );
+
+    EXPECT_EQ(Option::V6, opt1->getUniverse());
+
+    EXPECT_EQ(D6O_NAME_SERVERS, opt1->getType());
+    EXPECT_EQ(20, opt1->len());
+    Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
+    ASSERT_EQ(1, addrs.size());
+    IOAddress addr = addrs[0];
+    EXPECT_EQ("2001:db8:1::dead:beef", addr.toText());
+
+    // pack this option again in the same buffer, but in
+    // different place
+    int offset = opt1->pack(buf,300, 100);
+
+    EXPECT_EQ(120, offset);
+    EXPECT_EQ( 0, memcmp(expected1, &buf[100], 20) );
+
+    // two addresses
+    Option6AddrLst* opt2 = 0;
+    EXPECT_NO_THROW(
+        opt2 = new Option6AddrLst(D6O_SIP_SERVERS_ADDR, buf, 128, 0, 32);
+    );
+    EXPECT_EQ(D6O_SIP_SERVERS_ADDR, opt2->getType());
+    EXPECT_EQ(36, opt2->len());
+    addrs = opt2->getAddresses();
+    ASSERT_EQ(2, addrs.size());
+    EXPECT_EQ("2001:db8:1::dead:beef", addrs[0].toText());
+    EXPECT_EQ("ff02::face:b00c", addrs[1].toText());
+
+    // pack this option again in the same buffer, but in
+    // different place
+    offset = opt2->pack(buf,300, 150);
+
+    EXPECT_EQ(150+36, offset);
+    EXPECT_EQ( 0, memcmp(expected2, &buf[150], 36));
+
+    // three addresses
+    Option6AddrLst* opt3 = 0;
+    EXPECT_NO_THROW(
+        opt3 = new Option6AddrLst(D6O_NIS_SERVERS, buf, 128, 0, 48);
+    );
+
+    EXPECT_EQ(D6O_NIS_SERVERS, opt3->getType());
+    EXPECT_EQ(52, opt3->len());
+    addrs = opt3->getAddresses();
+    ASSERT_EQ(3, addrs.size());
+    EXPECT_EQ("2001:db8:1::dead:beef", addrs[0].toText());
+    EXPECT_EQ("ff02::face:b00c", addrs[1].toText());
+    EXPECT_EQ("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", addrs[2].toText());
+
+    // pack this option again in the same buffer, but in
+    // different place
+    offset = opt3->pack(buf,300, 200);
+
+    EXPECT_EQ(252, offset);
+    EXPECT_EQ( 0, memcmp(expected3, &buf[200], 52) );
+
+    EXPECT_NO_THROW(
+        delete opt1;
+        delete opt2;
+        delete opt3;
+    );
+}
+
+TEST_F(Option6AddrLstTest, constructors) {
+
+    Option6AddrLst* opt1 = 0;
+    EXPECT_NO_THROW(
+        opt1 = new Option6AddrLst(1234, IOAddress("::1"));
+    );
+    EXPECT_EQ(Option::V6, opt1->getUniverse());
+    EXPECT_EQ(1234, opt1->getType());
+
+    Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
+    ASSERT_EQ(1, addrs.size() );
+    EXPECT_EQ("::1", addrs[0].toText());
+
+    addrs.clear();
+    addrs.push_back(IOAddress(string("fe80::1234")));
+    addrs.push_back(IOAddress(string("2001:db8:1::baca")));
+
+    Option6AddrLst* opt2 = 0;
+    EXPECT_NO_THROW(
+        opt2 = new Option6AddrLst(5678, addrs);
+    );
+
+    Option6AddrLst::AddressContainer check = opt2->getAddresses();
+    ASSERT_EQ(2, check.size() );
+    EXPECT_EQ("fe80::1234", check[0].toText());
+    EXPECT_EQ("2001:db8:1::baca", check[1].toText());
+
+    EXPECT_NO_THROW(
+        delete opt1;
+        delete opt2;
+    );
+}
+
+TEST_F(Option6AddrLstTest, setAddress) {
+    Option6AddrLst* opt1 = 0;
+    EXPECT_NO_THROW(
+        opt1 = new Option6AddrLst(1234, IOAddress("::1"));
+    );
+    opt1->setAddress(IOAddress("2001:db8:1::2"));
+    /// TODO It used to be ::2 address, but io_address represents
+    /// it as ::0.0.0.2. Purpose of this test is to verify
+    /// that setAddress() works, not deal with subtleties of
+    /// io_address handling of IPv4-mapped IPv6 addresses, we
+    /// switched to a more common address. User interested
+    /// in pursuing this matter further is encouraged to look
+    /// at section 2.5.5 of RFC4291 (and possibly implement
+    /// a test for IOAddress)
+
+    Option6AddrLst::AddressContainer addrs = opt1->getAddresses();
+    ASSERT_EQ(1, addrs.size() );
+    EXPECT_EQ("2001:db8:1::2", addrs[0].toText());
+
+    EXPECT_NO_THROW(
+        delete opt1;
+    );
+}
+
+} // namespace
diff --git a/src/lib/dhcp/tests/option6_ia_unittest.cc b/src/lib/dhcp/tests/option6_ia_unittest.cc
new file mode 100644
index 0000000..3fd52f5
--- /dev/null
+++ b/src/lib/dhcp/tests/option6_ia_unittest.cc
@@ -0,0 +1,266 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include <boost/shared_array.hpp>
+#include <boost/shared_ptr.hpp>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/option.h"
+#include "dhcp/option6_ia.h"
+#include "dhcp/option6_iaaddr.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+
+namespace {
+class Option6IATest : public ::testing::Test {
+public:
+    Option6IATest() {
+    }
+};
+
+TEST_F(Option6IATest, basic) {
+
+    boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
+    for (int i = 0; i < 128; i++)
+        simple_buf[i] = 0;
+    simple_buf[0] = 0xa1; // iaid
+    simple_buf[1] = 0xa2;
+    simple_buf[2] = 0xa3;
+    simple_buf[3] = 0xa4;
+
+    simple_buf[4] = 0x81; // T1
+    simple_buf[5] = 0x02;
+    simple_buf[6] = 0x03;
+    simple_buf[7] = 0x04;
+
+    simple_buf[8] = 0x84; // T2
+    simple_buf[9] = 0x03;
+    simple_buf[10] = 0x02;
+    simple_buf[11] = 0x01;
+
+    // create an option
+    // unpack() is called from constructor
+    Option6IA* opt = new Option6IA(D6O_IA_NA,
+                                   simple_buf,
+                                   128,
+                                   0,
+                                   12);
+
+    EXPECT_EQ(Option::V6, opt->getUniverse());
+    EXPECT_EQ(D6O_IA_NA, opt->getType());
+    EXPECT_EQ(0xa1a2a3a4, opt->getIAID());
+    EXPECT_EQ(0x81020304, opt->getT1());
+    EXPECT_EQ(0x84030201, opt->getT2());
+
+    // pack this option again in the same buffer, but in
+    // different place
+
+    // test for pack()
+    int offset = opt->pack(simple_buf, 128, 60);
+
+    // 4 bytes header + 4 bytes content
+    EXPECT_EQ(12, opt->len() - 4);
+    EXPECT_EQ(D6O_IA_NA, opt->getType());
+
+    EXPECT_EQ(offset, 76); // 60 + lenght(IA_NA) = 76
+
+    // check if pack worked properly:
+    // if option type is correct
+    EXPECT_EQ(D6O_IA_NA, simple_buf[60]*256 + simple_buf[61]);
+
+    // if option length is correct
+    EXPECT_EQ(12, simple_buf[62]*256 + simple_buf[63]);
+
+    // if iaid is correct
+    unsigned int iaid = htonl(*(unsigned int*)&simple_buf[64]);
+    EXPECT_EQ(0xa1a2a3a4, iaid );
+
+   // if T1 is correct
+    EXPECT_EQ(0x81020304, (simple_buf[68] << 24) +
+                          (simple_buf[69] << 16) +
+                          (simple_buf[70] << 8) +
+                          (simple_buf[71]) );
+
+    // if T1 is correct
+    EXPECT_EQ(0x84030201, (simple_buf[72] << 24) +
+                          (simple_buf[73] << 16) +
+                          (simple_buf[74] << 8) +
+                          (simple_buf[75]) );
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+TEST_F(Option6IATest, simple) {
+    boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
+    for (int i = 0; i < 128; i++)
+        simple_buf[i] = 0;
+
+    Option6IA * ia = new Option6IA(D6O_IA_NA, 1234);
+    ia->setT1(2345);
+    ia->setT2(3456);
+
+    EXPECT_EQ(Option::V6, ia->getUniverse());
+    EXPECT_EQ(D6O_IA_NA, ia->getType());
+    EXPECT_EQ(1234, ia->getIAID());
+    EXPECT_EQ(2345, ia->getT1());
+    EXPECT_EQ(3456, ia->getT2());
+
+    EXPECT_NO_THROW(
+        delete ia;
+    );
+}
+
+// test if option can build suboptions
+TEST_F(Option6IATest, suboptions_pack) {
+    boost::shared_array<uint8_t> buf(new uint8_t[128]);
+    for (int i=0; i<128; i++)
+        buf[i] = 0;
+    buf[0] = 0xff;
+    buf[1] = 0xfe;
+    buf[2] = 0xfc;
+
+    Option6IA * ia = new Option6IA(D6O_IA_NA, 0x13579ace);
+    ia->setT1(0x2345);
+    ia->setT2(0x3456);
+
+    boost::shared_ptr<Option> sub1(new Option(Option::V6,
+                                              0xcafe));
+
+    boost::shared_ptr<Option6IAAddr> addr1(
+        new Option6IAAddr(D6O_IAADDR, IOAddress("2001:db8:1234:5678::abcd"),
+                          0x5000, 0x7000));
+
+    ia->addOption(sub1);
+    ia->addOption(addr1);
+
+    ASSERT_EQ(28, addr1->len());
+    ASSERT_EQ(4, sub1->len());
+    ASSERT_EQ(48, ia->len());
+
+    uint8_t expected[] = {
+        D6O_IA_NA/256, D6O_IA_NA%256, // type
+        0, 44, // length
+        0x13, 0x57, 0x9a, 0xce, // iaid
+        0, 0, 0x23, 0x45,  // T1
+        0, 0, 0x34, 0x56,  // T2
+
+        // iaaddr suboption
+        D6O_IAADDR/256, D6O_IAADDR%256, // type
+        0, 24, // len
+        0x20, 0x01, 0xd, 0xb8, 0x12,0x34, 0x56, 0x78,
+        0, 0, 0, 0, 0, 0, 0xab, 0xcd, // IP address
+        0, 0, 0x50, 0, // preferred-lifetime
+        0, 0, 0x70, 0, // valid-lifetime
+
+        // suboption
+        0xca, 0xfe, // type
+        0, 0 // len
+    };
+
+    int offset = ia->pack(buf, 128, 10);
+    ASSERT_EQ(offset, 10 + 48);
+
+    EXPECT_EQ(0, memcmp(&buf[10], expected, 48));
+
+    EXPECT_NO_THROW(
+        delete ia;
+    );
+}
+
+// test if option can parse suboptions
+TEST_F(Option6IATest, suboptions_unpack) {
+
+
+    uint8_t expected[] = {
+        D6O_IA_NA/256, D6O_IA_NA%256, // type
+        0, 28, // length
+        0x13, 0x57, 0x9a, 0xce, // iaid
+        0, 0, 0x23, 0x45,  // T1
+        0, 0, 0x34, 0x56,  // T2
+
+        // iaaddr suboption
+        D6O_IAADDR/256, D6O_IAADDR%256, // type
+        0, 24, // len
+        0x20, 0x01, 0xd, 0xb8, 0x12,0x34, 0x56, 0x78,
+        0, 0, 0, 0, 0, 0, 0xab, 0xcd, // IP address
+        0, 0, 0x50, 0, // preferred-lifetime
+        0, 0, 0x70, 0, // valid-lifetime
+
+        // suboption
+        0xca, 0xfe, // type
+        0, 0 // len
+    };
+
+    boost::shared_array<uint8_t> buf(new uint8_t[128]);
+    for (int i = 0; i < 128; i++)
+        buf[i] = 0;
+    memcpy(&buf[0], expected, 48);
+
+    Option6IA* ia = 0;
+    EXPECT_NO_THROW({
+        ia = new Option6IA(D6O_IA_NA, buf, 128, 4, 44);
+
+        // let's limit verbosity of this test
+        // cout << "Parsed option:" << endl << ia->toText() << endl;
+    });
+    ASSERT_TRUE(ia);
+
+    EXPECT_EQ(D6O_IA_NA, ia->getType());
+    EXPECT_EQ(0x13579ace, ia->getIAID());
+    EXPECT_EQ(0x2345, ia->getT1());
+    EXPECT_EQ(0x3456, ia->getT2());
+
+    boost::shared_ptr<Option> subopt = ia->getOption(D6O_IAADDR);
+    ASSERT_NE(boost::shared_ptr<Option>(), subopt); // non-NULL
+
+    // checks for address option
+    Option6IAAddr * addr = dynamic_cast<Option6IAAddr*>(subopt.get());
+    ASSERT_TRUE(NULL != addr);
+
+    EXPECT_EQ(D6O_IAADDR, addr->getType());
+    EXPECT_EQ(28, addr->len());
+    EXPECT_EQ(0x5000, addr->getPreferred());
+    EXPECT_EQ(0x7000, addr->getValid());
+    EXPECT_EQ("2001:db8:1234:5678::abcd", addr->getAddress().toText());
+
+    // checks for dummy option
+    subopt = ia->getOption(0xcafe);
+    ASSERT_TRUE(subopt); // should be non-NULL
+
+    EXPECT_EQ(0xcafe, subopt->getType());
+    EXPECT_EQ(4, subopt->len());
+    // there should be no data at all
+    EXPECT_EQ(0, subopt->getData().size());
+
+    subopt = ia->getOption(1); // get option 1
+    ASSERT_FALSE(subopt); // should be NULL
+
+    EXPECT_NO_THROW(
+        delete ia;
+    );
+}
+
+}
diff --git a/src/lib/dhcp/tests/option6_iaaddr_unittest.cc b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
new file mode 100644
index 0000000..81c3eb3
--- /dev/null
+++ b/src/lib/dhcp/tests/option6_iaaddr_unittest.cc
@@ -0,0 +1,105 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/option.h"
+#include "dhcp/option6_iaaddr.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+
+namespace {
+class Option6IAAddrTest : public ::testing::Test {
+public:
+    Option6IAAddrTest() {
+    }
+};
+
+/// TODO reenable this once ticket #1313 is implemented.
+TEST_F(Option6IAAddrTest, basic) {
+
+    boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
+    for (int i = 0; i < 128; i++)
+        simple_buf[i] = 0;
+
+    simple_buf[0] = 0x20;
+    simple_buf[1] = 0x01;
+    simple_buf[2] = 0x0d;
+    simple_buf[3] = 0xb8;
+    simple_buf[4] = 0x00;
+    simple_buf[5] = 0x01;
+    simple_buf[12] = 0xde;
+    simple_buf[13] = 0xad;
+    simple_buf[14] = 0xbe;
+    simple_buf[15] = 0xef; // 2001:db8:1::dead:beef
+
+    simple_buf[16] = 0x00;
+    simple_buf[17] = 0x00;
+    simple_buf[18] = 0x03;
+    simple_buf[19] = 0xe8; // 1000
+
+    simple_buf[20] = 0xb2;
+    simple_buf[21] = 0xd0;
+    simple_buf[22] = 0x5e;
+    simple_buf[23] = 0x00; // 3,000,000,000
+
+    // create an option (unpack content)
+    Option6IAAddr* opt = new Option6IAAddr(D6O_IAADDR,
+                                           simple_buf,
+                                           128,
+                                           0,
+                                           24);
+
+    // pack this option again in the same buffer, but in
+    // different place
+    int offset = opt->pack(simple_buf, 128, 50);
+
+    EXPECT_EQ(78, offset);
+
+    EXPECT_EQ(Option::V6, opt->getUniverse());
+
+    // 4 bytes header + 4 bytes content
+    EXPECT_EQ("2001:db8:1::dead:beef", opt->getAddress().toText());
+    EXPECT_EQ(1000, opt->getPreferred());
+    EXPECT_EQ(3000000000U, opt->getValid());
+
+    EXPECT_EQ(D6O_IAADDR, opt->getType());
+
+    EXPECT_EQ(Option::OPTION6_HDR_LEN + Option6IAAddr::OPTION6_IAADDR_LEN,
+              opt->len());
+
+    // check if pack worked properly:
+    // if option type is correct
+    EXPECT_EQ(D6O_IAADDR, simple_buf[50]*256 + simple_buf[51]);
+
+    // if option length is correct
+    EXPECT_EQ(24, simple_buf[52]*256 + simple_buf[53]);
+
+    // if option content is correct
+    EXPECT_EQ(0, memcmp(&simple_buf[0], &simple_buf[54],24));
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+}
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
new file mode 100644
index 0000000..db3ee3b
--- /dev/null
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -0,0 +1,419 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <boost/shared_ptr.hpp>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
+
+#include "dhcp/dhcp6.h"
+#include "dhcp/option.h"
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::util;
+
+namespace {
+class OptionTest : public ::testing::Test {
+public:
+    OptionTest() {
+    }
+};
+
+// v4 is not really implemented yet. A simple test will do for now
+TEST_F(OptionTest, v4_basic) {
+
+    Option* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option(Option::V4, 17);
+    );
+
+    EXPECT_EQ(Option::V4, opt->getUniverse());
+    EXPECT_EQ(17, opt->getType());
+    EXPECT_EQ(0, opt->getData().size());
+    EXPECT_EQ(2, opt->len()); // just v4 header
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+    opt = 0;
+
+    // V4 options have type 0...255
+    EXPECT_THROW(
+        opt = new Option(Option::V4, 256),
+        BadValue
+    );
+    if (opt) {
+        delete opt;
+        opt = 0;
+    }
+}
+
+const uint8_t dummyPayload[] =
+{ 1, 2, 3, 4};
+
+TEST_F(OptionTest, v4_data1) {
+
+    vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+    Option* opt = 0;
+
+    // create DHCPv4 option of type 123
+    // that contains 4 bytes of data
+    ASSERT_NO_THROW(
+        opt= new Option(Option::V4,
+                        123, // type
+                        data);
+    );
+
+    // check that content is reported properly
+    EXPECT_EQ(123, opt->getType());
+    vector<uint8_t> optData = opt->getData();
+    ASSERT_EQ(optData.size(), data.size());
+    EXPECT_TRUE(optData == data);
+    EXPECT_EQ(2, opt->getHeaderLen());
+    EXPECT_EQ(6, opt->len());
+
+    // now store that option into a buffer
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW(
+        opt->pack4(buf);
+    );
+
+    // check content of that buffer
+
+    // 2 byte header + 4 bytes data
+    ASSERT_EQ(6, buf.getLength());
+
+    // that's how this option is supposed to look like
+    uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+    /// TODO: use vector<uint8_t> getData() when it will be implemented
+    EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+    // check that we can destroy that option
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+// this is almost the same test as v4_data1, but it uses
+// different constructor
+TEST_F(OptionTest, v4_data2) {
+
+    vector<uint8_t> data(dummyPayload, dummyPayload + sizeof(dummyPayload));
+
+    vector<uint8_t> expData = data;
+
+    // Add fake data in front and end. Main purpose of this test is to check
+    // that only subset of the whole vector can be used for creating option.
+    data.insert(data.begin(), 56);
+    data.push_back(67);
+
+    // Data contains extra garbage at beginning and at the end. It should be
+    // ignored, as we pass interators to proper data. Only subset (limited by
+    // iterators) of the vector should be used.
+    // expData contains expected content (just valid data, without garbage).
+
+    Option* opt = 0;
+
+    // Create DHCPv4 option of type 123 that contains
+    // 4 bytes (sizeof(dummyPayload).
+    ASSERT_NO_THROW(
+        opt= new Option(Option::V4,
+                        123, // type
+                        data.begin() + 1,
+                        data.end() - 1);
+    );
+
+    // check that content is reported properly
+    EXPECT_EQ(123, opt->getType());
+    vector<uint8_t> optData = opt->getData();
+    ASSERT_EQ(optData.size(), expData.size());
+    EXPECT_TRUE(optData == expData);
+    EXPECT_EQ(2, opt->getHeaderLen());
+    EXPECT_EQ(6, opt->len());
+
+    // now store that option into a buffer
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW(
+        opt->pack4(buf);
+    );
+
+    // check content of that buffer
+
+    // 2 byte header + 4 bytes data
+    ASSERT_EQ(6, buf.getLength());
+
+    // that's how this option is supposed to look like
+    uint8_t exp[] = { 123, 4, 1, 2, 3, 4 };
+
+    /// TODO: use vector<uint8_t> getData() when it will be implemented
+    EXPECT_EQ(0, memcmp(exp, buf.getData(), 6));
+
+    // check that we can destroy that option
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+TEST_F(OptionTest, v4_toText) {
+
+    vector<uint8_t> buf(3);
+    buf[0] = 0;
+    buf[1] = 0xf;
+    buf[2] = 0xff;
+
+    Option opt(Option::V4, 253, buf);
+
+    EXPECT_EQ("type=253, len=3: 00:0f:ff", opt.toText());
+}
+
+// tests simple constructor
+TEST_F(OptionTest, v6_basic) {
+
+    Option* opt = new Option(Option::V6, 1);
+
+    EXPECT_EQ(Option::V6, opt->getUniverse());
+    EXPECT_EQ(1, opt->getType());
+    EXPECT_EQ(0, opt->getData().size());
+    EXPECT_EQ(4, opt->len()); // just v6 header
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+// tests contructor used in pkt reception
+// option contains actual data
+TEST_F(OptionTest, v6_data1) {
+    boost::shared_array<uint8_t> buf(new uint8_t[32]);
+    for (int i = 0; i < 32; i++)
+        buf[i] = 100+i;
+    Option* opt = new Option(Option::V6, 333, //type
+                             buf,
+                             3, // offset
+                             7); // 7 bytes of data
+    EXPECT_EQ(333, opt->getType());
+
+    ASSERT_EQ(11, opt->len());
+    ASSERT_EQ(7, opt->getData().size());
+    EXPECT_EQ(0, memcmp(&buf[3], &opt->getData()[0], 7) );
+
+    int offset = opt->pack(buf, 32, 20);
+    EXPECT_EQ(31, offset);
+
+    EXPECT_EQ(buf[20], 333/256); // type
+    EXPECT_EQ(buf[21], 333%256);
+
+    EXPECT_EQ(buf[22], 0); // len
+    EXPECT_EQ(buf[23], 7);
+
+    // payload
+    EXPECT_EQ(0, memcmp(&buf[3], &buf[24], 7) );
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+// another text that tests the same thing, just
+// with different input parameters
+TEST_F(OptionTest, v6_data2) {
+
+    boost::shared_array<uint8_t> simple_buf(new uint8_t[128]);
+    for (int i = 0; i < 128; i++)
+        simple_buf[i] = 0;
+    simple_buf[0] = 0xa1;
+    simple_buf[1] = 0xa2;
+    simple_buf[2] = 0xa3;
+    simple_buf[3] = 0xa4;
+
+    // create an option (unpack content)
+    Option* opt = new Option(Option::V6,
+                             D6O_CLIENTID,
+                             simple_buf,
+                             0,
+                             4);
+
+    // pack this option again in the same buffer, but in
+    // different place
+    int offset18 = opt->pack(simple_buf, 128, 10);
+
+    // 4 bytes header + 4 bytes content
+    EXPECT_EQ(8, opt->len());
+    EXPECT_EQ(D6O_CLIENTID, opt->getType());
+
+    EXPECT_EQ(offset18, 18);
+
+    // check if pack worked properly:
+    // if option type is correct
+    EXPECT_EQ(D6O_CLIENTID, simple_buf[10]*256 + simple_buf[11]);
+
+    // if option length is correct
+    EXPECT_EQ(4, simple_buf[12]*256 + simple_buf[13]);
+
+    // if option content is correct
+    EXPECT_EQ(0, memcmp(&simple_buf[0], &simple_buf[14],4));
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+// check that an option can contain 2 suboptions:
+// opt1
+//  +----opt2
+//  |
+//  +----opt3
+//
+TEST_F(OptionTest, v6_suboptions1) {
+    boost::shared_array<uint8_t> buf(new uint8_t[128]);
+    for (int i=0; i<128; i++)
+        buf[i] = 100+i;
+    Option* opt1 = new Option(Option::V6, 65535, //type
+                              buf,
+                              0, // offset
+                              3); // 3 bytes of data
+    boost::shared_ptr<Option> opt2(new Option(Option::V6, 13));
+    boost::shared_ptr<Option> opt3(new Option(Option::V6, 7,
+                                              buf,
+                                              3, // offset
+                                              5)); // 5 bytes of data
+    opt1->addOption(opt2);
+    opt1->addOption(opt3);
+    // opt2 len = 4 (just header)
+    // opt3 len = 9 4(header)+5(data)
+    // opt1 len = 7 + suboptions() = 7 + 4 + 9 = 20
+
+    EXPECT_EQ(4, opt2->len());
+    EXPECT_EQ(9, opt3->len());
+    EXPECT_EQ(20, opt1->len());
+
+    uint8_t expected[] = {
+        0xff, 0xff, 0, 16, 100, 101, 102,
+        0, 7, 0, 5, 103, 104, 105, 106, 107,
+        0, 13, 0, 0 // no data at all
+    };
+
+    int offset = opt1->pack(buf, 128, 20);
+    EXPECT_EQ(40, offset);
+
+    // payload
+    EXPECT_EQ(0, memcmp(&buf[20], expected, 20) );
+
+    EXPECT_NO_THROW(
+        delete opt1;
+    );
+}
+
+// check that an option can contain nested suboptions:
+// opt1
+//  +----opt2
+//        |
+//        +----opt3
+//
+TEST_F(OptionTest, v6_suboptions2) {
+    boost::shared_array<uint8_t> buf(new uint8_t[128]);
+    for (int i=0; i<128; i++)
+        buf[i] = 100+i;
+    Option* opt1 = new Option(Option::V6, 65535, //type
+                              buf,
+                              0, // offset
+                              3); // 3 bytes of data
+    boost::shared_ptr<Option> opt2(new Option(Option::V6, 13));
+    boost::shared_ptr<Option> opt3(new Option(Option::V6, 7,
+                                              buf,
+                                              3, // offset
+                                              5)); // 5 bytes of data
+    opt1->addOption(opt2);
+    opt2->addOption(opt3);
+    // opt3 len = 9 4(header)+5(data)
+    // opt2 len = 4 (just header) + len(opt3)
+    // opt1 len = 7 + len(opt2)
+
+    uint8_t expected[] = {
+        0xff, 0xff, 0, 16, 100, 101, 102,
+        0, 13, 0, 9,
+        0, 7, 0, 5, 103, 104, 105, 106, 107,
+    };
+
+    int offset = opt1->pack(buf, 128, 20);
+    EXPECT_EQ(40, offset);
+
+    // payload
+    EXPECT_EQ(0, memcmp(&buf[20], expected, 20) );
+
+    EXPECT_NO_THROW(
+        delete opt1;
+    );
+}
+
+TEST_F(OptionTest, v6_addgetdel) {
+    boost::shared_array<uint8_t> buf(new uint8_t[128]);
+    for (int i=0; i<128; i++)
+        buf[i] = 100+i;
+    Option* parent = new Option(Option::V6, 65535); //type
+    boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
+    boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
+    boost::shared_ptr<Option> opt3(new Option(Option::V6, 2));
+
+    parent->addOption(opt1);
+    parent->addOption(opt2);
+
+    // getOption() test
+    EXPECT_EQ(opt1, parent->getOption(1));
+    EXPECT_EQ(opt2, parent->getOption(2));
+
+    // expect NULL
+    EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(4));
+
+    // now there are 2 options of type 2
+    parent->addOption(opt3);
+
+    // let's delete one of them
+    EXPECT_EQ(true, parent->delOption(2));
+
+    // there still should be the other option 2
+    EXPECT_NE(boost::shared_ptr<Option>(), parent->getOption(2));
+
+    // let's delete the other option 2
+    EXPECT_EQ(true, parent->delOption(2));
+
+    // no more options with type=2
+    EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(2));
+
+    // let's try to delete - should fail
+    EXPECT_TRUE(false ==  parent->delOption(2));
+}
+
+}
+
+TEST_F(OptionTest, v6_toText) {
+    boost::shared_array<uint8_t> buf(new uint8_t[3]);
+    buf[0] = 0;
+    buf[1] = 0xf;
+    buf[2] = 0xff;
+
+    boost::shared_ptr<Option> opt(new Option(Option::V6, 258,
+                                             buf, 0, 3));
+
+    EXPECT_EQ("type=258, len=3: 00:0f:ff", opt->toText());
+}
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
new file mode 100644
index 0000000..0f70442
--- /dev/null
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -0,0 +1,564 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <boost/static_assert.hpp>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/dhcp4.h>
+#include <exceptions/exceptions.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+using namespace isc::util;
+using namespace boost;
+
+namespace {
+
+TEST(Pkt4Test, constructor) {
+
+    ASSERT_EQ(236U, static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) );
+    Pkt4* pkt = 0;
+
+    // Just some dummy payload.
+    uint8_t testData[250];
+    for (int i = 0; i < 250; i++) {
+        testData[i]=i;
+    }
+
+    // Positive case1. Normal received packet.
+    EXPECT_NO_THROW(
+        pkt = new Pkt4(testData, Pkt4::DHCPV4_PKT_HDR_LEN);
+    );
+
+    EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
+
+    EXPECT_NO_THROW(
+        delete pkt;
+        pkt = 0;
+    );
+
+    // Positive case2. Normal outgoing packet.
+    EXPECT_NO_THROW(
+        pkt = new Pkt4(DHCPDISCOVER, 0xffffffff);
+    );
+
+    // DHCPv4 packet must be at least 236 bytes long
+    EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
+    EXPECT_EQ(DHCPDISCOVER, pkt->getType());
+    EXPECT_EQ(0xffffffff, pkt->getTransid());
+    EXPECT_NO_THROW(
+        delete pkt;
+        pkt = 0;
+    );
+
+    // Negative case. Should drop truncated messages.
+    EXPECT_THROW(
+        pkt = new Pkt4(testData, Pkt4::DHCPV4_PKT_HDR_LEN-1),
+        OutOfRange
+    );
+    if (pkt) {
+        // Test failed. Exception should have been thrown, but
+        // object was created instead. Let's clean this up.
+        delete pkt;
+        pkt = 0;
+    }
+}
+
+// a sample data
+const uint8_t dummyOp = BOOTREQUEST;
+const uint8_t dummyHtype = 6;
+const uint8_t dummyHlen = 6;
+const uint8_t dummyHops = 13;
+const uint32_t dummyTransid = 0x12345678;
+const uint16_t dummySecs = 42;
+const uint16_t dummyFlags = BOOTP_BROADCAST;
+
+const IOAddress dummyCiaddr("192.0.2.1");
+const IOAddress dummyYiaddr("1.2.3.4");
+const IOAddress dummySiaddr("192.0.2.255");
+const IOAddress dummyGiaddr("255.255.255.255");
+
+// a dummy MAC address
+const uint8_t dummyMacAddr[] = {0, 1, 2, 3, 4, 5};
+
+// a dummy MAC address, padded with 0s
+const uint8_t dummyChaddr[16] = {0, 1, 2, 3, 4, 5, 0, 0,
+                                 0, 0, 0, 0, 0, 0, 0, 0 };
+
+// let's use some creative test content here (128 chars + \0)
+const uint8_t dummyFile[] = "Lorem ipsum dolor sit amet, consectetur "
+    "adipiscing elit. Proin mollis placerat metus, at "
+    "lacinia orci ornare vitae. Mauris amet.";
+
+// yet another type of test content (64 chars + \0)
+const uint8_t dummySname[] = "Lorem ipsum dolor sit amet, consectetur "
+    "adipiscing elit posuere.";
+
+BOOST_STATIC_ASSERT(sizeof(dummyFile)  == Pkt4::MAX_FILE_LEN + 1);
+BOOST_STATIC_ASSERT(sizeof(dummySname) == Pkt4::MAX_SNAME_LEN + 1);
+
+/// @brief Generates test packet.
+///
+/// Allocates and generates test packet, with all fixed
+/// fields set to non-zero values. Content is not always
+/// reasonable.
+///
+/// See generateTestPacket2() function that returns
+/// exactly the same packet in on-wire format.
+///
+/// @return pointer to allocated Pkt4 object.
+boost::shared_ptr<Pkt4>
+generateTestPacket1() {
+
+    boost::shared_ptr<Pkt4> pkt(new Pkt4(DHCPDISCOVER, dummyTransid));
+
+    vector<uint8_t> vectorMacAddr(dummyMacAddr, dummyMacAddr
+                                  +sizeof(dummyMacAddr));
+
+    // hwType = 6(ETHERNET), hlen = 6(MAC address len)
+    pkt->setHWAddr(dummyHtype, dummyHlen, vectorMacAddr);
+    pkt->setHops(dummyHops); // 13 relays. Wow!
+    // Transaction-id is already set.
+    pkt->setSecs(dummySecs);
+    pkt->setFlags(dummyFlags); // all flags set
+    pkt->setCiaddr(dummyCiaddr);
+    pkt->setYiaddr(dummyYiaddr);
+    pkt->setSiaddr(dummySiaddr);
+    pkt->setGiaddr(dummyGiaddr);
+    // Chaddr already set with setHWAddr().
+    pkt->setSname(dummySname, 64);
+    pkt->setFile(dummyFile, 128);
+
+    return (pkt);
+}
+
+/// @brief Generates test packet.
+///
+/// Allocates and generates on-wire buffer that represents
+/// test packet, with all fixed fields set to non-zero values.
+/// Content is not always reasonable.
+///
+/// See generateTestPacket1() function that returns
+/// exactly the same packet as Pkt4 object.
+///
+/// @return pointer to allocated Pkt4 object
+// Returns a vector containing a DHCPv4 packet header.
+vector<uint8_t>
+generateTestPacket2() {
+
+    // That is only part of the header. It contains all "short" fields,
+    // larger fields are constructed separately.
+    uint8_t hdr[] = {
+        1, 6, 6, 13,            // op, htype, hlen, hops,
+        0x12, 0x34, 0x56, 0x78, // transaction-id
+        0, 42, 0x80, 0x00,      // 42 secs, BROADCAST flags
+        192, 0, 2, 1,           // ciaddr
+        1, 2, 3, 4,             // yiaddr
+        192, 0, 2, 255,         // siaddr
+        255, 255, 255, 255,     // giaddr
+    };
+
+    // Initialize the vector with the header fields defined above.
+    vector<uint8_t> buf(hdr, hdr + sizeof(hdr));
+
+    // Append the large header fields.
+    copy(dummyChaddr, dummyChaddr + Pkt4::MAX_CHADDR_LEN, back_inserter(buf));
+    copy(dummySname, dummySname + Pkt4::MAX_SNAME_LEN, back_inserter(buf));
+    copy(dummyFile, dummyFile + Pkt4::MAX_FILE_LEN, back_inserter(buf));
+
+    // Should now have all the header, so check.  The "static_cast" is used
+    // to get round an odd bug whereby the linker appears not to find the
+    // definition of DHCPV4_PKT_HDR_LEN if it appears within an EXPECT_EQ().
+    EXPECT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), buf.size());
+
+    return (buf);
+}
+
+TEST(Pkt4Test, fixedFields) {
+
+    shared_ptr<Pkt4> pkt = generateTestPacket1();
+
+    // ok, let's check packet values
+    EXPECT_EQ(dummyOp, pkt->getOp());
+    EXPECT_EQ(dummyHtype, pkt->getHtype());
+    EXPECT_EQ(dummyHlen, pkt->getHlen());
+    EXPECT_EQ(dummyHops, pkt->getHops());
+    EXPECT_EQ(dummyTransid, pkt->getTransid());
+    EXPECT_EQ(dummySecs, pkt->getSecs());
+    EXPECT_EQ(dummyFlags, pkt->getFlags());
+
+    EXPECT_EQ(dummyCiaddr.toText(), pkt->getCiaddr().toText());
+    EXPECT_EQ(dummyYiaddr.toText(), pkt->getYiaddr().toText());
+    EXPECT_EQ(dummySiaddr.toText(), pkt->getSiaddr().toText());
+    EXPECT_EQ(dummyGiaddr.toText(), pkt->getGiaddr().toText());
+
+    // chaddr is always 16 bytes long and contains link-layer addr (MAC)
+    EXPECT_EQ(0, memcmp(dummyChaddr, pkt->getChaddr(), 16));
+
+    EXPECT_EQ(0, memcmp(dummySname, &pkt->getSname()[0], 64));
+
+    EXPECT_EQ(0, memcmp(dummyFile, &pkt->getFile()[0], 128));
+
+    EXPECT_EQ(DHCPDISCOVER, pkt->getType());
+}
+
+TEST(Pkt4Test, fixedFieldsPack) {
+    shared_ptr<Pkt4> pkt = generateTestPacket1();
+    vector<uint8_t> expectedFormat = generateTestPacket2();
+
+    EXPECT_NO_THROW(
+        pkt->pack();
+    );
+
+    ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN), pkt->len());
+
+    // redundant but MUCH easier for debug in gdb
+    const uint8_t* exp = &expectedFormat[0];
+    const uint8_t* got = static_cast<const uint8_t*>(pkt->getBuffer().getData());
+
+    EXPECT_EQ(0, memcmp(exp, got, Pkt4::DHCPV4_PKT_HDR_LEN));
+}
+
+/// TODO Uncomment when ticket #1226 is implemented
+TEST(Pkt4Test, fixedFieldsUnpack) {
+    vector<uint8_t> expectedFormat = generateTestPacket2();
+
+    shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
+                                  Pkt4::DHCPV4_PKT_HDR_LEN));
+
+    EXPECT_NO_THROW(
+        pkt->unpack()
+    );
+
+    // ok, let's check packet values
+    EXPECT_EQ(dummyOp, pkt->getOp());
+    EXPECT_EQ(dummyHtype, pkt->getHtype());
+    EXPECT_EQ(dummyHlen, pkt->getHlen());
+    EXPECT_EQ(dummyHops, pkt->getHops());
+    EXPECT_EQ(dummyTransid, pkt->getTransid());
+    EXPECT_EQ(dummySecs, pkt->getSecs());
+    EXPECT_EQ(dummyFlags, pkt->getFlags());
+
+    EXPECT_EQ(dummyCiaddr.toText(), pkt->getCiaddr().toText());
+    EXPECT_EQ(string("1.2.3.4"), pkt->getYiaddr().toText());
+    EXPECT_EQ(string("192.0.2.255"), pkt->getSiaddr().toText());
+    EXPECT_EQ(string("255.255.255.255"), pkt->getGiaddr().toText());
+
+    // chaddr is always 16 bytes long and contains link-layer addr (MAC)
+    EXPECT_EQ(0, memcmp(dummyChaddr, pkt->getChaddr(), Pkt4::MAX_CHADDR_LEN));
+
+    ASSERT_EQ(static_cast<size_t>(Pkt4::MAX_SNAME_LEN), pkt->getSname().size());
+    EXPECT_EQ(0, memcmp(dummySname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
+
+    ASSERT_EQ(static_cast<size_t>(Pkt4::MAX_FILE_LEN), pkt->getFile().size());
+    EXPECT_EQ(0, memcmp(dummyFile, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
+
+    EXPECT_EQ(DHCPDISCOVER, pkt->getType());
+}
+
+// this test is for hardware addresses (htype, hlen and chaddr fields)
+TEST(Pkt4Test, hwAddr) {
+
+    vector<uint8_t> mac;
+    uint8_t expectedChaddr[Pkt4::MAX_CHADDR_LEN];
+
+    // We resize vector to specified length. It is more natural for fixed-length
+    // field, than clear it (shrink size to 0) and push_back each element
+    // (growing length back to MAX_CHADDR_LEN).
+    mac.resize(Pkt4::MAX_CHADDR_LEN);
+
+    Pkt4* pkt = 0;
+    // let's test each hlen, from 0 till 16
+    for (int macLen = 0; macLen < Pkt4::MAX_CHADDR_LEN; macLen++) {
+        for (int i = 0; i < Pkt4::MAX_CHADDR_LEN; i++) {
+            mac[i] = 0;
+            expectedChaddr[i] = 0;
+        }
+        for (int i = 0; i < macLen; i++) {
+            mac[i] = 128 + i;
+            expectedChaddr[i] = 128 + i;
+        }
+
+        // type and transaction doesn't matter in this test
+        pkt = new Pkt4(DHCPOFFER, 1234);
+        pkt->setHWAddr(255-macLen*10, // just weird htype
+                       macLen,
+                       mac);
+        EXPECT_EQ(0, memcmp(expectedChaddr, pkt->getChaddr(),
+                            Pkt4::MAX_CHADDR_LEN));
+
+        EXPECT_NO_THROW(
+            pkt->pack();
+        );
+
+        // CHADDR starts at offset 28 in DHCP packet
+        const uint8_t* ptr =
+            static_cast<const uint8_t*>(pkt->getBuffer().getData())+28;
+
+        EXPECT_EQ(0, memcmp(ptr, expectedChaddr, Pkt4::MAX_CHADDR_LEN));
+
+        delete pkt;
+    }
+
+    /// TODO: extend this test once options support is implemented. HW address
+    /// longer than 16 bytes should be stored in client-identifier option
+}
+
+TEST(Pkt4Test, msgTypes) {
+
+    struct msgType {
+        uint8_t dhcp;
+        uint8_t bootp;
+    };
+
+    msgType types[] = {
+        {DHCPDISCOVER, BOOTREQUEST},
+        {DHCPOFFER, BOOTREPLY},
+        {DHCPREQUEST, BOOTREQUEST},
+        {DHCPDECLINE, BOOTREQUEST},
+        {DHCPACK, BOOTREPLY},
+        {DHCPNAK, BOOTREPLY},
+        {DHCPRELEASE, BOOTREQUEST},
+        {DHCPINFORM, BOOTREQUEST},
+        {DHCPLEASEQUERY, BOOTREQUEST},
+        {DHCPLEASEUNASSIGNED, BOOTREPLY},
+        {DHCPLEASEUNKNOWN, BOOTREPLY},
+        {DHCPLEASEACTIVE, BOOTREPLY}
+    };
+
+    Pkt4* pkt = 0;
+    for (int i = 0; i < sizeof(types) / sizeof(msgType); i++) {
+
+        pkt = new Pkt4(types[i].dhcp, 0);
+        EXPECT_EQ(types[i].dhcp, pkt->getType());
+
+        EXPECT_EQ(types[i].bootp, pkt->getOp());
+
+        delete pkt;
+        pkt = 0;
+    }
+
+    EXPECT_THROW(
+        pkt = new Pkt4(100, 0), // there's no message type 100
+        OutOfRange
+    );
+    if (pkt) {
+        delete pkt;
+    }
+}
+
+// this test verifies handling of sname field
+TEST(Pkt4Test, sname) {
+
+    uint8_t sname[Pkt4::MAX_SNAME_LEN];
+
+    Pkt4* pkt = 0;
+    // let's test each sname length, from 0 till 64
+    for (int snameLen=0; snameLen < Pkt4::MAX_SNAME_LEN; snameLen++) {
+        for (int i = 0; i < Pkt4::MAX_SNAME_LEN; i++) {
+            sname[i] = 0;
+        }
+        for (int i = 0; i < snameLen; i++) {
+            sname[i] = i;
+        }
+
+        // type and transaction doesn't matter in this test
+        pkt = new Pkt4(DHCPOFFER, 1234);
+        pkt->setSname(sname, snameLen);
+
+        EXPECT_EQ(0, memcmp(sname, &pkt->getSname()[0], Pkt4::MAX_SNAME_LEN));
+
+        EXPECT_NO_THROW(
+            pkt->pack();
+        );
+
+        // SNAME starts at offset 44 in DHCP packet
+        const uint8_t* ptr =
+            static_cast<const uint8_t*>(pkt->getBuffer().getData())+44;
+        EXPECT_EQ(0, memcmp(ptr, sname, Pkt4::MAX_SNAME_LEN));
+
+        delete pkt;
+    }
+}
+
+TEST(Pkt4Test, file) {
+
+    uint8_t file[Pkt4::MAX_FILE_LEN];
+
+    Pkt4* pkt = 0;
+    // Let's test each file length, from 0 till 128.
+    for (int fileLen = 0; fileLen < Pkt4::MAX_FILE_LEN; fileLen++) {
+        for (int i = 0; i < Pkt4::MAX_FILE_LEN; i++) {
+            file[i] = 0;
+        }
+        for (int i = 0; i < fileLen; i++) {
+            file[i] = i;
+        }
+
+        // Type and transaction doesn't matter in this test.
+        pkt = new Pkt4(DHCPOFFER, 1234);
+        pkt->setFile(file, fileLen);
+
+        EXPECT_EQ(0, memcmp(file, &pkt->getFile()[0], Pkt4::MAX_FILE_LEN));
+
+        //
+        EXPECT_NO_THROW(
+            pkt->pack();
+        );
+
+        // FILE starts at offset 108 in DHCP packet.
+        const uint8_t* ptr =
+            static_cast<const uint8_t*>(pkt->getBuffer().getData())+108;
+        EXPECT_EQ(0, memcmp(ptr, file, Pkt4::MAX_FILE_LEN));
+
+        delete pkt;
+    }
+
+}
+
+static uint8_t v4Opts[] = {
+    12,  3, 0,   1,  2,
+    13,  3, 10, 11, 12,
+    14,  3, 20, 21, 22,
+    128, 3, 30, 31, 32,
+    254, 3, 40, 41, 42
+};
+
+TEST(Pkt4Test, options) {
+    Pkt4* pkt = new Pkt4(DHCPOFFER, 0);
+
+    vector<uint8_t> payload[5];
+    for (int i = 0; i < 5; i++) {
+        payload[i].push_back(i*10);
+        payload[i].push_back(i*10+1);
+        payload[i].push_back(i*10+2);
+    }
+
+    boost::shared_ptr<Option> opt1(new Option(Option::V4, 12, payload[0]));
+    boost::shared_ptr<Option> opt2(new Option(Option::V4, 13, payload[1]));
+    boost::shared_ptr<Option> opt3(new Option(Option::V4, 14, payload[2]));
+    boost::shared_ptr<Option> opt5(new Option(Option::V4,128, payload[3]));
+    boost::shared_ptr<Option> opt4(new Option(Option::V4,254, payload[4]));
+
+    pkt->addOption(opt1);
+    pkt->addOption(opt2);
+    pkt->addOption(opt3);
+    pkt->addOption(opt4);
+    pkt->addOption(opt5);
+
+    EXPECT_TRUE(pkt->getOption(12));
+    EXPECT_TRUE(pkt->getOption(13));
+    EXPECT_TRUE(pkt->getOption(14));
+    EXPECT_TRUE(pkt->getOption(128));
+    EXPECT_TRUE(pkt->getOption(254));
+    EXPECT_FALSE(pkt->getOption(127)); //  no such option
+
+    // options are unique in DHCPv4. It should not be possible
+    // to add more than one option of the same type.
+    EXPECT_THROW(
+        pkt->addOption(opt1),
+        BadValue
+    );
+
+    EXPECT_NO_THROW(
+        pkt->pack();
+    );
+
+    const OutputBuffer& buf = pkt->getBuffer();
+    // check that all options are stored, they should take sizeof(v4Opts)
+    // there also should be OPTION_END added (just one byte)
+    ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts) + 1,
+              buf.getLength());
+
+    // that that this extra data actually contain our options
+    const uint8_t* ptr = static_cast<const uint8_t*>(buf.getData());
+    ptr += Pkt4::DHCPV4_PKT_HDR_LEN; // rewind to end of fixed part
+    EXPECT_EQ(0, memcmp(ptr, v4Opts, sizeof(v4Opts)));
+    EXPECT_EQ(DHO_END, static_cast<uint8_t>(*(ptr + sizeof(v4Opts))));
+
+    EXPECT_NO_THROW(
+        delete pkt;
+    );
+}
+
+TEST(Pkt4Test, unpackOptions) {
+
+    vector<uint8_t> expectedFormat = generateTestPacket2();
+
+    for (int i=0; i < sizeof(v4Opts); i++) {
+        expectedFormat.push_back(v4Opts[i]);
+    }
+
+    // now expectedFormat contains fixed format and 5 options
+
+    shared_ptr<Pkt4> pkt(new Pkt4(&expectedFormat[0],
+                                  expectedFormat.size()));
+
+    EXPECT_NO_THROW(
+        pkt->unpack()
+    );
+
+    EXPECT_TRUE(pkt->getOption(12));
+    EXPECT_TRUE(pkt->getOption(13));
+    EXPECT_TRUE(pkt->getOption(14));
+    EXPECT_TRUE(pkt->getOption(128));
+    EXPECT_TRUE(pkt->getOption(254));
+
+    shared_ptr<Option> x = pkt->getOption(12);
+    ASSERT_TRUE(x); // option 1 should exist
+    EXPECT_EQ(12, x->getType());  // this should be option 12
+    ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+2, 3)); // data len=3
+
+    x = pkt->getOption(13);
+    ASSERT_TRUE(x); // option 13 should exist
+    EXPECT_EQ(13, x->getType());  // this should be option 13
+    ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+7, 3)); // data len=3
+
+    x = pkt->getOption(14);
+    ASSERT_TRUE(x); // option 14 should exist
+    EXPECT_EQ(14, x->getType());  // this should be option 14
+    ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+12, 3)); // data len=3
+
+    x = pkt->getOption(128);
+    ASSERT_TRUE(x); // option 3 should exist
+    EXPECT_EQ(128, x->getType());  // this should be option 254
+    ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+17, 3)); // data len=3
+
+    x = pkt->getOption(254);
+    ASSERT_TRUE(x); // option 3 should exist
+    EXPECT_EQ(254, x->getType());  // this should be option 254
+    ASSERT_EQ(3, x->getData().size()); // it should be of length 3
+    EXPECT_EQ(5, x->len()); // total option length 5
+    EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+22, 3)); // data len=3
+}
+
+} // end of anonymous namespace
diff --git a/src/lib/dhcp/tests/pkt6_unittest.cc b/src/lib/dhcp/tests/pkt6_unittest.cc
new file mode 100644
index 0000000..968b24c
--- /dev/null
+++ b/src/lib/dhcp/tests/pkt6_unittest.cc
@@ -0,0 +1,207 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/dhcp6.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::asiolink;
+using namespace isc::dhcp;
+
+namespace {
+// empty class for now, but may be extended once Addr6 becomes bigger
+class Pkt6Test : public ::testing::Test {
+public:
+    Pkt6Test() {
+    }
+};
+
+TEST_F(Pkt6Test, constructor) {
+    Pkt6 * pkt1 = new Pkt6(17);
+
+    EXPECT_EQ(pkt1->data_len_, 17);
+
+    delete pkt1;
+}
+
+// captured actual SOLICIT packet: transid=0x3d79fb
+// options: client-id, in_na, dns-server, elapsed-time, option-request
+// this code is autogenerated (see src/bin/dhcp6/tests/iface_mgr_unittest.c)
+Pkt6 *capture1() {
+    Pkt6* pkt;
+    pkt = new Pkt6(98);
+    pkt->remote_port_ = 546;
+    pkt->remote_addr_ = IOAddress("fe80::21e:8cff:fe9b:7349");
+    pkt->local_port_ = 0;
+    pkt->local_addr_ = IOAddress("ff02::1:2");
+    pkt->ifindex_ = 2;
+    pkt->iface_ = "eth0";
+    pkt->data_[0]=1;
+    pkt->data_[1]=01;     pkt->data_[2]=02;     pkt->data_[3]=03;     pkt->data_[4]=0;
+    pkt->data_[5]=1;     pkt->data_[6]=0;     pkt->data_[7]=14;     pkt->data_[8]=0;
+    pkt->data_[9]=1;     pkt->data_[10]=0;     pkt->data_[11]=1;     pkt->data_[12]=21;
+    pkt->data_[13]=158;     pkt->data_[14]=60;     pkt->data_[15]=22;     pkt->data_[16]=0;
+    pkt->data_[17]=30;     pkt->data_[18]=140;     pkt->data_[19]=155;     pkt->data_[20]=115;
+    pkt->data_[21]=73;     pkt->data_[22]=0;     pkt->data_[23]=3;     pkt->data_[24]=0;
+    pkt->data_[25]=40;     pkt->data_[26]=0;     pkt->data_[27]=0;     pkt->data_[28]=0;
+    pkt->data_[29]=1;     pkt->data_[30]=255;     pkt->data_[31]=255;     pkt->data_[32]=255;
+    pkt->data_[33]=255;     pkt->data_[34]=255;     pkt->data_[35]=255;     pkt->data_[36]=255;
+    pkt->data_[37]=255;     pkt->data_[38]=0;     pkt->data_[39]=5;     pkt->data_[40]=0;
+    pkt->data_[41]=24;     pkt->data_[42]=32;     pkt->data_[43]=1;     pkt->data_[44]=13;
+    pkt->data_[45]=184;     pkt->data_[46]=0;     pkt->data_[47]=1;     pkt->data_[48]=0;
+    pkt->data_[49]=0;     pkt->data_[50]=0;     pkt->data_[51]=0;     pkt->data_[52]=0;
+    pkt->data_[53]=0;     pkt->data_[54]=0;     pkt->data_[55]=0;     pkt->data_[56]=18;
+    pkt->data_[57]=52;     pkt->data_[58]=255;     pkt->data_[59]=255;     pkt->data_[60]=255;
+    pkt->data_[61]=255;     pkt->data_[62]=255;     pkt->data_[63]=255;     pkt->data_[64]=255;
+    pkt->data_[65]=255;     pkt->data_[66]=0;     pkt->data_[67]=23;     pkt->data_[68]=0;
+    pkt->data_[69]=16;     pkt->data_[70]=32;     pkt->data_[71]=1;     pkt->data_[72]=13;
+    pkt->data_[73]=184;     pkt->data_[74]=0;     pkt->data_[75]=1;     pkt->data_[76]=0;
+    pkt->data_[77]=0;     pkt->data_[78]=0;     pkt->data_[79]=0;     pkt->data_[80]=0;
+    pkt->data_[81]=0;     pkt->data_[82]=0;     pkt->data_[83]=0;     pkt->data_[84]=221;
+    pkt->data_[85]=221;     pkt->data_[86]=0;     pkt->data_[87]=8;     pkt->data_[88]=0;
+    pkt->data_[89]=2;     pkt->data_[90]=0;     pkt->data_[91]=100;     pkt->data_[92]=0;
+    pkt->data_[93]=6;     pkt->data_[94]=0;     pkt->data_[95]=2;     pkt->data_[96]=0;
+    pkt->data_[97]=23;
+    return (pkt);
+}
+
+TEST_F(Pkt6Test, unpack_solicit1) {
+    Pkt6 * sol = capture1();
+
+    ASSERT_EQ(true, sol->unpack());
+
+    // check for length
+    EXPECT_EQ(98, sol->len() );
+
+    // check for type
+    EXPECT_EQ(DHCPV6_SOLICIT, sol->getType() );
+
+    // check that all present options are returned
+    EXPECT_TRUE(sol->getOption(D6O_CLIENTID)); // client-id is present
+    EXPECT_TRUE(sol->getOption(D6O_IA_NA));    // IA_NA is present
+    EXPECT_TRUE(sol->getOption(D6O_ELAPSED_TIME));  // elapsed is present
+    EXPECT_TRUE(sol->getOption(D6O_NAME_SERVERS));
+    EXPECT_TRUE(sol->getOption(D6O_ORO));
+
+    // let's check that non-present options are not returned
+    EXPECT_FALSE(sol->getOption(D6O_SERVERID)); // server-id is missing
+    EXPECT_FALSE(sol->getOption(D6O_IA_TA));
+    EXPECT_FALSE(sol->getOption(D6O_IAADDR));
+
+    // let's limit verbosity of this test
+    // std::cout << sol->toText();
+
+    delete sol;
+}
+
+TEST_F(Pkt6Test, packUnpack) {
+
+    Pkt6 * parent = new Pkt6(100);
+
+    parent->setType(DHCPV6_SOLICIT);
+
+    boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
+    boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
+    boost::shared_ptr<Option> opt3(new Option(Option::V6, 100));
+    // let's not use zero-length option type 3 as it is IA_NA
+
+    parent->addOption(opt1);
+    parent->addOption(opt2);
+    parent->addOption(opt3);
+
+    EXPECT_EQ(DHCPV6_SOLICIT, parent->getType());
+    int transid = parent->getTransid();
+    // transaction-id was randomized, let's remember it
+
+    // calculated length should be 16
+    EXPECT_EQ( Pkt6::DHCPV6_PKT_HDR_LEN + 3*Option::OPTION6_HDR_LEN, 
+               parent->len() );
+
+    EXPECT_TRUE( parent->pack() );
+
+    //
+    EXPECT_EQ( Pkt6::DHCPV6_PKT_HDR_LEN + 3*Option::OPTION6_HDR_LEN, 
+               parent->len() );
+
+    // let's delete options from options_ collection
+    // they still be defined in packed 
+    parent->options_.clear();
+
+    // that that removed options are indeed are gone
+    EXPECT_EQ( 4, parent->len() );
+
+    // now recreate options list
+    EXPECT_TRUE( parent->unpack() );
+
+    // transid, message-type should be the same as before
+    EXPECT_EQ(transid, parent->getTransid());
+    EXPECT_EQ(DHCPV6_SOLICIT, parent->getType());
+    
+    EXPECT_TRUE( parent->getOption(1));
+    EXPECT_TRUE( parent->getOption(2));
+    EXPECT_TRUE( parent->getOption(100));
+    EXPECT_FALSE( parent->getOption(4));
+    
+    delete parent;
+}
+
+TEST_F(Pkt6Test, addGetDelOptions) {
+    Pkt6 * parent = new Pkt6(100);
+
+    boost::shared_ptr<Option> opt1(new Option(Option::V6, 1));
+    boost::shared_ptr<Option> opt2(new Option(Option::V6, 2));
+    boost::shared_ptr<Option> opt3(new Option(Option::V6, 2));
+
+    parent->addOption(opt1);
+    parent->addOption(opt2);
+
+    // getOption() test
+    EXPECT_EQ(opt1, parent->getOption(1));
+    EXPECT_EQ(opt2, parent->getOption(2));
+
+    // expect NULL
+    EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(4));
+
+    // now there are 2 options of type 2
+    parent->addOption(opt3);
+
+    // let's delete one of them
+    EXPECT_EQ(true, parent->delOption(2));
+
+    // there still should be the other option 2
+    EXPECT_NE(boost::shared_ptr<Option>(), parent->getOption(2));
+
+    // let's delete the other option 2
+    EXPECT_EQ(true, parent->delOption(2));
+
+    // no more options with type=2
+    EXPECT_EQ(boost::shared_ptr<Option>(), parent->getOption(2));
+
+    // let's try to delete - should fail
+    EXPECT_TRUE(false ==  parent->delOption(2));
+
+    delete parent;
+}
+
+
+}
diff --git a/src/lib/dhcp/tests/run_unittests.cc b/src/lib/dhcp/tests/run_unittests.cc
new file mode 100644
index 0000000..db27f76
--- /dev/null
+++ b/src/lib/dhcp/tests/run_unittests.cc
@@ -0,0 +1,27 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <log/logger_support.h>
+
+int
+main(int argc, char* argv[]) {
+    ::testing::InitGoogleTest(&argc, argv);
+    isc::log::initLogger();
+
+    int result = RUN_ALL_TESTS();
+
+    return (result);
+}
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 887ac09..5b93f75 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -23,14 +23,22 @@ EXTRA_DIST += rdata/generic/cname_5.cc
 EXTRA_DIST += rdata/generic/cname_5.h
 EXTRA_DIST += rdata/generic/detail/nsec_bitmap.cc
 EXTRA_DIST += rdata/generic/detail/nsec_bitmap.h
+EXTRA_DIST += rdata/generic/detail/txt_like.h
+EXTRA_DIST += rdata/generic/detail/ds_like.h
+EXTRA_DIST += rdata/generic/dlv_32769.cc
+EXTRA_DIST += rdata/generic/dlv_32769.h
 EXTRA_DIST += rdata/generic/dname_39.cc
 EXTRA_DIST += rdata/generic/dname_39.h
 EXTRA_DIST += rdata/generic/dnskey_48.cc
 EXTRA_DIST += rdata/generic/dnskey_48.h
 EXTRA_DIST += rdata/generic/ds_43.cc
 EXTRA_DIST += rdata/generic/ds_43.h
+EXTRA_DIST += rdata/generic/hinfo_13.cc
+EXTRA_DIST += rdata/generic/hinfo_13.h
 EXTRA_DIST += rdata/generic/mx_15.cc
 EXTRA_DIST += rdata/generic/mx_15.h
+EXTRA_DIST += rdata/generic/naptr_35.cc
+EXTRA_DIST += rdata/generic/naptr_35.h
 EXTRA_DIST += rdata/generic/ns_2.cc
 EXTRA_DIST += rdata/generic/ns_2.h
 EXTRA_DIST += rdata/generic/nsec3_50.cc
@@ -49,14 +57,24 @@ EXTRA_DIST += rdata/generic/rrsig_46.cc
 EXTRA_DIST += rdata/generic/rrsig_46.h
 EXTRA_DIST += rdata/generic/soa_6.cc
 EXTRA_DIST += rdata/generic/soa_6.h
+EXTRA_DIST += rdata/generic/spf_99.cc
+EXTRA_DIST += rdata/generic/spf_99.h
 EXTRA_DIST += rdata/generic/txt_16.cc
 EXTRA_DIST += rdata/generic/txt_16.h
+EXTRA_DIST += rdata/generic/minfo_14.cc
+EXTRA_DIST += rdata/generic/minfo_14.h
+EXTRA_DIST += rdata/generic/afsdb_18.cc
+EXTRA_DIST += rdata/generic/afsdb_18.h
 EXTRA_DIST += rdata/hs_4/a_1.cc
 EXTRA_DIST += rdata/hs_4/a_1.h
 EXTRA_DIST += rdata/in_1/a_1.cc
 EXTRA_DIST += rdata/in_1/a_1.h
 EXTRA_DIST += rdata/in_1/aaaa_28.cc
 EXTRA_DIST += rdata/in_1/aaaa_28.h
+EXTRA_DIST += rdata/in_1/dhcid_49.cc
+EXTRA_DIST += rdata/in_1/dhcid_49.h
+EXTRA_DIST += rdata/in_1/srv_33.cc
+EXTRA_DIST += rdata/in_1/srv_33.h
 #EXTRA_DIST += rdata/template.cc
 #EXTRA_DIST += rdata/template.h
 
@@ -66,6 +84,8 @@ BUILT_SOURCES += rdataclass.h rdataclass.cc
 
 lib_LTLIBRARIES = libdns++.la
 
+libdns___la_LDFLAGS = -no-undefined -version-info 1:0:1
+
 libdns___la_SOURCES =
 libdns___la_SOURCES += edns.h edns.cc
 libdns___la_SOURCES += exceptions.h exceptions.cc
@@ -84,12 +104,16 @@ libdns___la_SOURCES += rrsetlist.h rrsetlist.cc
 libdns___la_SOURCES += rrttl.h rrttl.cc
 libdns___la_SOURCES += rrtype.cc
 libdns___la_SOURCES += question.h question.cc
+libdns___la_SOURCES += serial.h serial.cc
 libdns___la_SOURCES += tsig.h tsig.cc
 libdns___la_SOURCES += tsigerror.h tsigerror.cc
 libdns___la_SOURCES += tsigkey.h tsigkey.cc
 libdns___la_SOURCES += tsigrecord.h tsigrecord.cc
+libdns___la_SOURCES += character_string.h character_string.cc
 libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.h
 libdns___la_SOURCES += rdata/generic/detail/nsec_bitmap.cc
+libdns___la_SOURCES += rdata/generic/detail/txt_like.h
+libdns___la_SOURCES += rdata/generic/detail/ds_like.h
 
 libdns___la_CPPFLAGS = $(AM_CPPFLAGS)
 # Most applications of libdns++ will only implicitly rely on libcryptolink,
diff --git a/src/lib/dns/benchmarks/Makefile.am b/src/lib/dns/benchmarks/Makefile.am
index 8645385..0d7856f 100644
--- a/src/lib/dns/benchmarks/Makefile.am
+++ b/src/lib/dns/benchmarks/Makefile.am
@@ -13,5 +13,6 @@ noinst_PROGRAMS = rdatarender_bench
 rdatarender_bench_SOURCES = rdatarender_bench.cc
 
 rdatarender_bench_LDADD = $(top_builddir)/src/lib/dns/libdns++.la
+rdatarender_bench_LDADD += $(top_builddir)/src/lib/util/libutil.la
 rdatarender_bench_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 rdatarender_bench_LDADD += $(SQLITE_LIBS)
diff --git a/src/lib/dns/character_string.cc b/src/lib/dns/character_string.cc
new file mode 100644
index 0000000..3a289ac
--- /dev/null
+++ b/src/lib/dns/character_string.cc
@@ -0,0 +1,140 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include "character_string.h"
+#include "rdata.h"
+
+using namespace std;
+using namespace isc::dns::rdata;
+
+namespace isc {
+namespace dns {
+
+namespace {
+bool isDigit(char c) {
+    return (('0' <= c) && (c <= '9'));
+}
+}
+
+std::string
+characterstr::getNextCharacterString(const std::string& input_str,
+                              std::string::const_iterator& input_iterator)
+{
+    string result;
+
+    // If the input string only contains white-spaces, it is an invalid
+    // <character-string>
+    if (input_iterator >= input_str.end()) {
+        isc_throw(InvalidRdataText, "Invalid text format, \
+                  <character-string> field is missing.");
+    }
+
+    // Whether the <character-string> is separated with double quotes (")
+    bool quotes_separated = (*input_iterator == '"');
+    // Whether the quotes are pared if the string is quotes separated
+    bool quotes_paired = false;
+
+    if (quotes_separated) {
+        ++input_iterator;
+    }
+
+    while(input_iterator < input_str.end()){
+        // Escaped characters processing
+        if (*input_iterator == '\\') {
+            if (input_iterator + 1 == input_str.end()) {
+                isc_throw(InvalidRdataText, "<character-string> ended \
+                          prematurely.");
+            } else {
+                if (isDigit(*(input_iterator + 1))) {
+                    // \DDD where each D is a digit. It its the octet
+                    // corresponding to the decimal number described by DDD
+                    if (input_iterator + 3 >= input_str.end()) {
+                        isc_throw(InvalidRdataText, "<character-string> ended \
+                                  prematurely.");
+                    } else {
+                        int n = 0;
+                        ++input_iterator;
+                        for (int i = 0; i < 3; ++i) {
+                            if (isDigit(*input_iterator)) {
+                                n = n*10 + (*input_iterator - '0');
+                                ++input_iterator;
+                            } else {
+                                isc_throw(InvalidRdataText, "Illegal decimal \
+                                          escaping series");
+                            }
+                        }
+                        if (n > 255) {
+                            isc_throw(InvalidRdataText, "Illegal octet \
+                                      number");
+                        }
+                        result.push_back(n);
+                        continue;
+                    }
+                } else {
+                    ++input_iterator;
+                    result.push_back(*input_iterator);
+                    ++input_iterator;
+                    continue;
+                }
+            }
+        }
+
+        if (quotes_separated) {
+            // If the <character-string> is seperated with quotes symbol and
+            // another quotes symbol is encountered, it is the end of the
+            // <character-string>
+            if (*input_iterator == '"') {
+                quotes_paired = true;
+                ++input_iterator;
+                // Reach the end of character string
+                break;
+            }
+        } else if (*input_iterator == ' ') {
+            // If the <character-string> is not seperated with quotes symbol,
+            // it is seperated with <space> char
+            break;
+        }
+
+        result.push_back(*input_iterator);
+
+        ++input_iterator;
+    }
+
+    if (result.size() > MAX_CHARSTRING_LEN) {
+        isc_throw(CharStringTooLong, "<character-string> is too long");
+    }
+
+    if (quotes_separated && !quotes_paired) {
+        isc_throw(InvalidRdataText, "The quotes are not paired");
+    }
+
+    return (result);
+}
+
+std::string
+characterstr::getNextCharacterString(util::InputBuffer& buffer, size_t len) {
+    uint8_t str_len = buffer.readUint8();
+
+    size_t pos = buffer.getPosition();
+    if (len - pos < str_len) {
+        isc_throw(InvalidRdataLength, "Invalid string length");
+    }
+
+    uint8_t buf[MAX_CHARSTRING_LEN];
+    buffer.readData(buf, str_len);
+    return (string(buf, buf + str_len));
+}
+
+} // end of namespace dns
+} // end of namespace isc
diff --git a/src/lib/dns/character_string.h b/src/lib/dns/character_string.h
new file mode 100644
index 0000000..7961274
--- /dev/null
+++ b/src/lib/dns/character_string.h
@@ -0,0 +1,57 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __CHARACTER_STRING_H
+#define __CHARACTER_STRING_H
+
+#include <string>
+#include <exceptions/exceptions.h>
+#include <util/buffer.h>
+
+namespace isc {
+namespace dns {
+
+// \brief Some utility functions to extract <character-string> from string
+// or InputBuffer
+//
+// <character-string> is expressed in one or two ways: as a contiguous set
+// of characters without interior spaces, or as a string beginning with a "
+// and ending with a ".  Inside a " delimited string any character can
+// occur, except for a " itself, which must be quoted using \ (back slash).
+// Ref. RFC1035
+
+
+namespace characterstr {
+    /// Get a <character-string> from a string
+    ///
+    /// \param input_str The input string
+    /// \param input_iterator The iterator from which to start extracting,
+    ///        the iterator will be updated to new position after the function
+    ///        is returned
+    /// \return A std::string that contains the extracted <character-string>
+    std::string getNextCharacterString(const std::string& input_str,
+                                       std::string::const_iterator& input_iterator);
+
+    /// Get a <character-string> from a input buffer
+    ///
+    /// \param buffer The input buffer
+    /// \param len The input buffer total length
+    /// \return A std::string that contains the extracted <character-string>
+    std::string getNextCharacterString(util::InputBuffer& buffer, size_t len);
+
+} // namespace characterstr
+} // namespace dns
+} // namespace isc
+
+#endif // __CHARACTER_STRING_H
diff --git a/src/lib/dns/gen-rdatacode.py.in b/src/lib/dns/gen-rdatacode.py.in
index b3c8da2..f3cd5df 100755
--- a/src/lib/dns/gen-rdatacode.py.in
+++ b/src/lib/dns/gen-rdatacode.py.in
@@ -133,7 +133,15 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
     if classdir_mtime < getmtime('@srcdir@/rdata'):
         classdir_mtime = getmtime('@srcdir@/rdata')
 
-    for dir in list(os.listdir('@srcdir@/rdata')):
+    # Sort directories before iterating through them so that the directory
+    # list is processed in the same order on all systems.  The resulting
+    # files should compile regardless of the order in which the components
+    # are included but...  Having a fixed order for the directories should
+    # eliminate system-dependent problems.  (Note that the drectory names
+    # in BIND 10 are ASCII, so the order should be locale-independent.)
+    dirlist = os.listdir('@srcdir@/rdata')
+    dirlist.sort()
+    for dir in dirlist:
         classdir = '@srcdir@/rdata' + os.sep + dir
         m = re_typecode.match(dir)
         if os.path.isdir(classdir) and (m != None or dir == 'generic'):
@@ -145,7 +153,12 @@ def import_definitions(classcode2txt, typecode2txt, typeandclass):
                 class_code = m.group(2)
                 if not class_code in classcode2txt:
                     classcode2txt[class_code] = class_txt
-            for file in list(os.listdir(classdir)):
+
+            # Same considerations as directories regarding sorted order
+            # also apply to files.
+            filelist = os.listdir(classdir)
+            filelist.sort()
+            for file in filelist:
                 file = classdir + os.sep + file
                 m = re_typecode.match(os.path.split(file)[1])
                 if m != None:
diff --git a/src/lib/dns/message.cc b/src/lib/dns/message.cc
index c5ba4e1..b3e9229 100644
--- a/src/lib/dns/message.cc
+++ b/src/lib/dns/message.cc
@@ -124,10 +124,12 @@ public:
     void setOpcode(const Opcode& opcode);
     void setRcode(const Rcode& rcode);
     int parseQuestion(InputBuffer& buffer);
-    int parseSection(const Message::Section section, InputBuffer& buffer);
+    int parseSection(const Message::Section section, InputBuffer& buffer,
+                     Message::ParseOptions options);
     void addRR(Message::Section section, const Name& name,
                const RRClass& rrclass, const RRType& rrtype,
-               const RRTTL& ttl, ConstRdataPtr rdata);
+               const RRTTL& ttl, ConstRdataPtr rdata,
+               Message::ParseOptions options);
     void addEDNS(Message::Section section, const Name& name,
                  const RRClass& rrclass, const RRType& rrtype,
                  const RRTTL& ttl, const Rdata& rdata);
@@ -614,7 +616,7 @@ Message::parseHeader(InputBuffer& buffer) {
 }
 
 void
-Message::fromWire(InputBuffer& buffer) {
+Message::fromWire(InputBuffer& buffer, ParseOptions options) {
     if (impl_->mode_ != Message::PARSE) {
         isc_throw(InvalidMessageOperation,
                   "Message parse attempted in non parse mode");
@@ -626,11 +628,11 @@ Message::fromWire(InputBuffer& buffer) {
 
     impl_->counts_[SECTION_QUESTION] = impl_->parseQuestion(buffer);
     impl_->counts_[SECTION_ANSWER] =
-        impl_->parseSection(SECTION_ANSWER, buffer);
+        impl_->parseSection(SECTION_ANSWER, buffer, options);
     impl_->counts_[SECTION_AUTHORITY] =
-        impl_->parseSection(SECTION_AUTHORITY, buffer);
+        impl_->parseSection(SECTION_AUTHORITY, buffer, options);
     impl_->counts_[SECTION_ADDITIONAL] =
-        impl_->parseSection(SECTION_ADDITIONAL, buffer);
+        impl_->parseSection(SECTION_ADDITIONAL, buffer, options);
 }
 
 int
@@ -706,7 +708,7 @@ struct MatchRR : public unary_function<RRsetPtr, bool> {
 // is hardcoded here.
 int
 MessageImpl::parseSection(const Message::Section section,
-                          InputBuffer& buffer)
+                          InputBuffer& buffer, Message::ParseOptions options)
 {
     assert(section < MessageImpl::NUM_SECTIONS);
 
@@ -738,7 +740,7 @@ MessageImpl::parseSection(const Message::Section section,
             addTSIG(section, count, buffer, start_position, name, rrclass, ttl,
                     *rdata);
         } else {
-            addRR(section, name, rrclass, rrtype, ttl, rdata);
+            addRR(section, name, rrclass, rrtype, ttl, rdata, options);
             ++added;
         }
     }
@@ -749,19 +751,22 @@ MessageImpl::parseSection(const Message::Section section,
 void
 MessageImpl::addRR(Message::Section section, const Name& name,
                    const RRClass& rrclass, const RRType& rrtype,
-                   const RRTTL& ttl, ConstRdataPtr rdata)
+                   const RRTTL& ttl, ConstRdataPtr rdata,
+                   Message::ParseOptions options)
 {
-    vector<RRsetPtr>::iterator it =
-        find_if(rrsets_[section].begin(), rrsets_[section].end(),
-                MatchRR(name, rrtype, rrclass));
-    if (it != rrsets_[section].end()) {
-        (*it)->setTTL(min((*it)->getTTL(), ttl));
-        (*it)->addRdata(rdata);
-    } else {
-        RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
-        rrset->addRdata(rdata);
-        rrsets_[section].push_back(rrset);
+    if ((options & Message::PRESERVE_ORDER) == 0) {
+        vector<RRsetPtr>::iterator it =
+            find_if(rrsets_[section].begin(), rrsets_[section].end(),
+                    MatchRR(name, rrtype, rrclass));
+        if (it != rrsets_[section].end()) {
+            (*it)->setTTL(min((*it)->getTTL(), ttl));
+            (*it)->addRdata(rdata);
+            return;
+        }
     }
+    RRsetPtr rrset(new RRset(name, rrclass, rrtype, ttl));
+    rrset->addRdata(rdata);
+    rrsets_[section].push_back(rrset);
 }
 
 void
diff --git a/src/lib/dns/message.h b/src/lib/dns/message.h
index 6a8bf9f..47632cb 100644
--- a/src/lib/dns/message.h
+++ b/src/lib/dns/message.h
@@ -526,7 +526,7 @@ public:
     /// source message to the same section of this message
     ///
     /// \param section the section to append
-    /// \param target The source Message
+    /// \param source The source Message
     void appendSection(const Section section, const Message& source);
 
     /// \brief Prepare for making a response from a request.
@@ -581,11 +581,58 @@ public:
     /// message
     void toWire(AbstractMessageRenderer& renderer, TSIGContext& tsig_ctx);
 
+    /// Parse options.
+    ///
+    /// describe PRESERVE_ORDER: note doesn't affect EDNS or TSIG.
+    ///
+    /// The option values are used as a parameter for \c fromWire().
+    /// These are values of a bitmask type.  Bitwise operations can be
+    /// performed on these values to express compound options.
+    enum ParseOptions {
+        PARSE_DEFAULT = 0,       ///< The default options
+        PRESERVE_ORDER = 1       ///< Preserve RR order and don't combine them
+    };
+
     /// \brief Parse the header section of the \c Message.
     void parseHeader(isc::util::InputBuffer& buffer);
 
-    /// \brief Parse the \c Message.
-    void fromWire(isc::util::InputBuffer& buffer);
+    /// \brief (Re)build a \c Message object from wire-format data.
+    ///
+    /// This method parses the given wire format data to build a
+    /// complete Message object.  On success, the values of the header section
+    /// fields can be accessible via corresponding get methods, and the
+    /// question and following sections can be accessible via the
+    /// corresponding iterators.  If the message contains an EDNS or TSIG,
+    /// they can be accessible via \c getEDNS() and \c getTSIGRecord(),
+    /// respectively.
+    ///
+    /// This \c Message must be in the \c PARSE mode.
+    ///
+    /// This method performs strict validation on the given message based
+    /// on the DNS protocol specifications.  If the given message data is
+    /// invalid, this method throws an exception (see the exception list).
+    ///
+    /// By default, this method combines RRs of the same name, RR type and
+    /// RR class in a section into a single RRset, even if they are interleaved
+    /// with a different type of RR (though it would be a rare case in
+    /// practice).  If the \c PRESERVE_ORDER option is specified, it handles
+    /// each RR separately, in the appearing order, and converts it to a
+    /// separate RRset (so this RRset should contain exactly one Rdata).
+    /// This mode will be necessary when the higher level protocol is
+    /// ordering conscious.  For example, in AXFR and IXFR, the position of
+    /// the SOA RRs are crucial.
+    ///
+    /// \exception InvalidMessageOperation \c Message is in the RENDER mode
+    /// \exception DNSMessageFORMERR The given message data is syntactically
+    /// \exception MessageTooShort The given data is shorter than a valid
+    /// header section
+    /// \exception std::bad_alloc Memory allocation failure
+    /// \exception Others \c Name, \c Rdata, and \c EDNS classes can also throw
+    ///
+    /// \param buffer A input buffer object that stores the wire data
+    /// \param options Parse options
+    void fromWire(isc::util::InputBuffer& buffer, ParseOptions options
+        = PARSE_DEFAULT);
 
     ///
     /// \name Protocol constants
@@ -621,7 +668,7 @@ typedef boost::shared_ptr<const Message> ConstMessagePtr;
 ///
 /// \param os A \c std::ostream object on which the insertion operation is
 /// performed.
-/// \param record A \c Message object output by the operation.
+/// \param message A \c Message object output by the operation.
 /// \return A reference to the same \c std::ostream object referenced by
 /// parameter \c os after the insertion operation.
 std::ostream& operator<<(std::ostream& os, const Message& message);
@@ -629,6 +676,6 @@ std::ostream& operator<<(std::ostream& os, const Message& message);
 }
 #endif  // __MESSAGE_H
 
-// Local Variables: 
+// Local Variables:
 // mode: c++
-// End: 
+// End:
diff --git a/src/lib/dns/messagerenderer.cc b/src/lib/dns/messagerenderer.cc
index 767aca9..02f5519 100644
--- a/src/lib/dns/messagerenderer.cc
+++ b/src/lib/dns/messagerenderer.cc
@@ -150,8 +150,6 @@ private:
 struct MessageRenderer::MessageRendererImpl {
     /// \brief Constructor from an output buffer.
     ///
-    /// \param buffer An \c OutputBuffer object to which wire format data is
-    /// written.
     MessageRendererImpl() :
         nbuffer_(Name::MAX_WIRE), msglength_limit_(512),
         truncated_(false), compress_mode_(MessageRenderer::CASE_INSENSITIVE)
diff --git a/src/lib/dns/name.cc b/src/lib/dns/name.cc
index 4cd0b2b..772417f 100644
--- a/src/lib/dns/name.cc
+++ b/src/lib/dns/name.cc
@@ -700,7 +700,7 @@ Name::split(const unsigned int first, const unsigned int n) const {
 }
 
 Name
-Name::split(const unsigned level) const {
+Name::split(const unsigned int level) const {
     if (level >= getLabelCount()) {
         isc_throw(OutOfRange, "invalid level for name split (" << level
                   << ") for name " << *this);
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index 6c4ef54..dd14991 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -4,40 +4,48 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-pyexec_LTLIBRARIES = pydnspp.la
-pydnspp_la_SOURCES = pydnspp.cc pydnspp_common.cc pydnspp_towire.h
-pydnspp_la_SOURCES += name_python.cc name_python.h
-pydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
-pydnspp_la_SOURCES += rcode_python.cc rcode_python.h
-pydnspp_la_SOURCES += tsigkey_python.cc tsigkey_python.h
-pydnspp_la_SOURCES += tsigerror_python.cc tsigerror_python.h
-pydnspp_la_SOURCES += tsig_rdata_python.cc tsig_rdata_python.h
-pydnspp_la_SOURCES += tsigrecord_python.cc tsigrecord_python.h
-pydnspp_la_SOURCES += tsig_python.cc tsig_python.h
+lib_LTLIBRARIES = libpydnspp.la
+libpydnspp_la_SOURCES = pydnspp_common.cc pydnspp_common.h pydnspp_towire.h
+libpydnspp_la_SOURCES += name_python.cc name_python.h
+libpydnspp_la_SOURCES += rrset_python.cc rrset_python.h
+libpydnspp_la_SOURCES += rrclass_python.cc rrclass_python.h
+libpydnspp_la_SOURCES += rrtype_python.cc rrtype_python.h
+libpydnspp_la_SOURCES += rrttl_python.cc rrttl_python.h
+libpydnspp_la_SOURCES += rdata_python.cc rdata_python.h
+libpydnspp_la_SOURCES += serial_python.cc serial_python.h
+libpydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
+libpydnspp_la_SOURCES += rcode_python.cc rcode_python.h
+libpydnspp_la_SOURCES += opcode_python.cc opcode_python.h
+libpydnspp_la_SOURCES += question_python.cc question_python.h
+libpydnspp_la_SOURCES += tsigkey_python.cc tsigkey_python.h
+libpydnspp_la_SOURCES += tsigerror_python.cc tsigerror_python.h
+libpydnspp_la_SOURCES += tsig_rdata_python.cc tsig_rdata_python.h
+libpydnspp_la_SOURCES += tsigrecord_python.cc tsigrecord_python.h
+libpydnspp_la_SOURCES += tsig_python.cc tsig_python.h
+libpydnspp_la_SOURCES += edns_python.cc edns_python.h
+libpydnspp_la_SOURCES += message_python.cc message_python.h
+
+libpydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+libpydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+libpydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
+
 
+
+pyexec_LTLIBRARIES = pydnspp.la
+pydnspp_la_SOURCES = pydnspp.cc
 pydnspp_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
 # Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
 # placed after -Wextra defined in AM_CXXFLAGS
 pydnspp_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 pydnspp_la_LDFLAGS = $(PYTHON_LDFLAGS)
 
-# directly included from source files, so these don't have their own
-# rules
-EXTRA_DIST = pydnspp_common.h
-EXTRA_DIST += edns_python.cc
-EXTRA_DIST += message_python.cc
-EXTRA_DIST += rrclass_python.cc
-EXTRA_DIST += opcode_python.cc
-EXTRA_DIST += rrset_python.cc
-EXTRA_DIST += question_python.cc
-EXTRA_DIST += rrttl_python.cc
-EXTRA_DIST += rdata_python.cc
-EXTRA_DIST += rrtype_python.cc
-EXTRA_DIST += tsigerror_python_inc.cc
+EXTRA_DIST = tsigerror_python_inc.cc
+EXTRA_DIST += message_python_inc.cc
 
 # Python prefers .so, while some OSes (specifically MacOS) use a different
 # suffix for dynamic objects.  -module is necessary to work this around.
 pydnspp_la_LDFLAGS += -module
 pydnspp_la_LIBADD = $(top_builddir)/src/lib/dns/libdns++.la
 pydnspp_la_LIBADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
+pydnspp_la_LIBADD += libpydnspp.la
 pydnspp_la_LIBADD += $(PYTHON_LIB)
diff --git a/src/lib/dns/python/edns_python.cc b/src/lib/dns/python/edns_python.cc
index 83c3bfa..8f0f1a4 100644
--- a/src/lib/dns/python/edns_python.cc
+++ b/src/lib/dns/python/edns_python.cc
@@ -12,38 +12,38 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <Python.h>
+
 #include <cassert>
 
 #include <dns/edns.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "edns_python.h"
+#include "name_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rdata_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
 
 using namespace isc::dns;
-using namespace isc::util;
 using namespace isc::dns::rdata;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
 
 namespace {
-//
-// EDNS
-//
-
-// The s_* Class simply covers one instantiation of the object
 
 class s_EDNS : public PyObject {
 public:
-    EDNS* edns;
+    EDNS* cppobj;
 };
 
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
+typedef CPPPyObjectContainer<s_EDNS, EDNS> EDNSContainer;
 
 // General creation and destruction
 int EDNS_init(s_EDNS* self, PyObject* args);
@@ -103,60 +103,6 @@ PyMethodDef EDNS_methods[] = {
     { NULL, NULL, 0, NULL }
 };
 
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_EDNS
-// Most of the functions are not actually implemented and NULL here.
-PyTypeObject edns_type = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    "pydnspp.EDNS",
-    sizeof(s_EDNS),                     // tp_basicsize
-    0,                                  // tp_itemsize
-    (destructor)EDNS_destroy,           // tp_dealloc
-    NULL,                               // tp_print
-    NULL,                               // tp_getattr
-    NULL,                               // tp_setattr
-    NULL,                               // tp_reserved
-    NULL,                               // tp_repr
-    NULL,                               // tp_as_number
-    NULL,                               // tp_as_sequence
-    NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
-    NULL,                               // tp_call
-    EDNS_str,                           // tp_str
-    NULL,                               // tp_getattro
-    NULL,                               // tp_setattro
-    NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
-    "The EDNS class encapsulates DNS extensions "
-    "provided by the EDNSx protocol.",
-    NULL,                               // tp_traverse
-    NULL,                               // tp_clear
-    NULL,                               // tp_richcompare
-    0,                                  // tp_weaklistoffset
-    NULL,                               // tp_iter
-    NULL,                               // tp_iternext
-    EDNS_methods,                       // tp_methods
-    NULL,                               // tp_members
-    NULL,                               // tp_getset
-    NULL,                               // tp_base
-    NULL,                               // tp_dict
-    NULL,                               // tp_descr_get
-    NULL,                               // tp_descr_set
-    0,                                  // tp_dictoffset
-    (initproc)EDNS_init,                // tp_init
-    NULL,                               // tp_alloc
-    PyType_GenericNew,                  // tp_new
-    NULL,                               // tp_free
-    NULL,                               // tp_is_gc
-    NULL,                               // tp_bases
-    NULL,                               // tp_mro
-    NULL,                               // tp_cache
-    NULL,                               // tp_subclasses
-    NULL,                               // tp_weaklist
-    NULL,                               // tp_del
-    0                                   // tp_version_tag
-};
-
 EDNS*
 createFromRR(const Name& name, const RRClass& rrclass, const RRType& rrtype,
              const RRTTL& rrttl, const Rdata& rdata, uint8_t& extended_rcode)
@@ -179,15 +125,15 @@ createFromRR(const Name& name, const RRClass& rrclass, const RRType& rrtype,
 int
 EDNS_init(s_EDNS* self, PyObject* args) {
     uint8_t version = EDNS::SUPPORTED_VERSION;
-    const s_Name* name;
-    const s_RRClass* rrclass;
-    const s_RRType* rrtype;
-    const s_RRTTL* rrttl;
-    const s_Rdata* rdata;
+    const PyObject* name;
+    const PyObject* rrclass;
+    const PyObject* rrtype;
+    const PyObject* rrttl;
+    const PyObject* rdata;
 
     if (PyArg_ParseTuple(args, "|b", &version)) {
         try {
-            self->edns = new EDNS(version);
+            self->cppobj = new EDNS(version);
         } catch (const isc::InvalidParameter& ex) {
             PyErr_SetString(po_InvalidParameter, ex.what());
             return (-1);
@@ -203,10 +149,12 @@ EDNS_init(s_EDNS* self, PyObject* args) {
         // in this context so that we can share the try-catch logic with
         // EDNS_createFromRR() (see below).
         uint8_t extended_rcode;
-        self->edns = createFromRR(*name->cppobj, *rrclass->rrclass,
-                                  *rrtype->rrtype, *rrttl->rrttl,
-                                  *rdata->rdata, extended_rcode);
-        return (self->edns != NULL ? 0 : -1);
+        self->cppobj = createFromRR(PyName_ToName(name),
+                                    PyRRClass_ToRRClass(rrclass),
+                                    PyRRType_ToRRType(rrtype),
+                                    PyRRTTL_ToRRTTL(rrttl),
+                                    PyRdata_ToRdata(rdata), extended_rcode);
+        return (self->cppobj != NULL ? 0 : -1);
     }
 
     PyErr_Clear();
@@ -217,19 +165,19 @@ EDNS_init(s_EDNS* self, PyObject* args) {
 
 void
 EDNS_destroy(s_EDNS* const self) {
-    delete self->edns;
-    self->edns = NULL;
+    delete self->cppobj;
+    self->cppobj = NULL;
     Py_TYPE(self)->tp_free(self);
 }
 
 PyObject*
 EDNS_toText(const s_EDNS* const self) {
     // Py_BuildValue makes python objects from native data
-    return (Py_BuildValue("s", self->edns->toText().c_str()));
+    return (Py_BuildValue("s", self->cppobj->toText().c_str()));
 }
 
 PyObject*
-EDNS_str(PyObject* const self) {
+EDNS_str(PyObject* self) {
     // Simply call the to_text method we already defined
     return (PyObject_CallMethod(self,
                                 const_cast<char*>("to_text"),
@@ -240,14 +188,14 @@ PyObject*
 EDNS_toWire(const s_EDNS* const self, PyObject* args) {
     PyObject* bytes;
     uint8_t extended_rcode;
-    s_MessageRenderer* renderer;
+    PyObject* renderer;
 
     if (PyArg_ParseTuple(args, "Ob", &bytes, &extended_rcode) &&
         PySequence_Check(bytes)) {
         PyObject* bytes_o = bytes;
-        
+
         OutputBuffer buffer(0);
-        self->edns->toWire(buffer, extended_rcode);
+        self->cppobj->toWire(buffer, extended_rcode);
         PyObject* rd_bytes = PyBytes_FromStringAndSize(
             static_cast<const char*>(buffer.getData()), buffer.getLength());
         PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
@@ -257,8 +205,8 @@ EDNS_toWire(const s_EDNS* const self, PyObject* args) {
         return (result);
     } else if (PyArg_ParseTuple(args, "O!b", &messagerenderer_type,
                                 &renderer, &extended_rcode)) {
-        const unsigned int n = self->edns->toWire(*renderer->messagerenderer,
-                                                  extended_rcode);
+        const unsigned int n = self->cppobj->toWire(
+            PyMessageRenderer_ToMessageRenderer(renderer), extended_rcode);
 
         return (Py_BuildValue("I", n));
     }
@@ -269,12 +217,12 @@ EDNS_toWire(const s_EDNS* const self, PyObject* args) {
 
 PyObject*
 EDNS_getVersion(const s_EDNS* const self) {
-    return (Py_BuildValue("B", self->edns->getVersion()));
+    return (Py_BuildValue("B", self->cppobj->getVersion()));
 }
 
 PyObject*
 EDNS_getDNSSECAwareness(const s_EDNS* const self) {
-    if (self->edns->getDNSSECAwareness()) {
+    if (self->cppobj->getDNSSECAwareness()) {
         Py_RETURN_TRUE;
     } else {
         Py_RETURN_FALSE;
@@ -287,13 +235,13 @@ EDNS_setDNSSECAwareness(s_EDNS* self, PyObject* args) {
     if (!PyArg_ParseTuple(args, "O!", &PyBool_Type, &b)) {
         return (NULL);
     }
-    self->edns->setDNSSECAwareness(b == Py_True);
+    self->cppobj->setDNSSECAwareness(b == Py_True);
     Py_RETURN_NONE;
 }
 
 PyObject*
 EDNS_getUDPSize(const s_EDNS* const self) {
-    return (Py_BuildValue("I", self->edns->getUDPSize()));
+    return (Py_BuildValue("I", self->cppobj->getUDPSize()));
 }
 
 PyObject*
@@ -310,17 +258,17 @@ EDNS_setUDPSize(s_EDNS* self, PyObject* args) {
                         "UDP size is not an unsigned 16-bit integer");
         return (NULL);
     }
-    self->edns->setUDPSize(size);
+    self->cppobj->setUDPSize(size);
     Py_RETURN_NONE;
 }
 
 PyObject*
 EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
-    const s_Name* name;
-    const s_RRClass* rrclass;
-    const s_RRType* rrtype;
-    const s_RRTTL* rrttl;
-    const s_Rdata* rdata;
+    const PyObject* name;
+    const PyObject* rrclass;
+    const PyObject* rrtype;
+    const PyObject* rrttl;
+    const PyObject* rdata;
     s_EDNS* edns_obj = NULL;
 
     assert(null_self == NULL);
@@ -334,14 +282,17 @@ EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
             return (NULL);
         }
 
-        edns_obj->edns = createFromRR(*name->cppobj, *rrclass->rrclass,
-                                      *rrtype->rrtype, *rrttl->rrttl,
-                                      *rdata->rdata, extended_rcode);
-        if (edns_obj->edns != NULL) {
+        edns_obj->cppobj = createFromRR(PyName_ToName(name),
+                                        PyRRClass_ToRRClass(rrclass),
+                                        PyRRType_ToRRType(rrtype),
+                                        PyRRTTL_ToRRTTL(rrttl),
+                                        PyRdata_ToRdata(rdata),
+                                        extended_rcode);
+        if (edns_obj->cppobj != NULL) {
             PyObject* extrcode_obj = Py_BuildValue("B", extended_rcode);
             return (Py_BuildValue("OO", edns_obj, extrcode_obj));
         }
-        
+
         Py_DECREF(edns_obj);
         return (NULL);
     }
@@ -353,23 +304,90 @@ EDNS_createFromRR(const s_EDNS* null_self, PyObject* args) {
 }
 
 } // end of anonymous namespace
-// end of EDNS
 
-// Module Initialization, all statics are initialized here
+namespace isc {
+namespace dns {
+namespace python {
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_EDNS
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject edns_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.EDNS",
+    sizeof(s_EDNS),                     // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)EDNS_destroy,           // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    EDNS_str,                           // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The EDNS class encapsulates DNS extensions "
+    "provided by the EDNSx protocol.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    EDNS_methods,                       // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)EDNS_init,                // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createEDNSObject(const EDNS& source) {
+    EDNSContainer container(PyObject_New(s_EDNS, &edns_type));
+    container.set(new EDNS(source));
+    return (container.release());
+}
+
 bool
-initModulePart_EDNS(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&edns_type) < 0) {
-        return (false);
+PyEDNS_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
-    Py_INCREF(&edns_type);
-    void* p = &edns_type;
-    PyModule_AddObject(mod, "EDNS", static_cast<PyObject*>(p));
-
-    addClassVariable(edns_type, "SUPPORTED_VERSION",
-                     Py_BuildValue("B", EDNS::SUPPORTED_VERSION));
+    return (PyObject_TypeCheck(obj, &edns_type));
+}
 
-    return (true);
+const EDNS&
+PyEDNS_ToEDNS(const PyObject* edns_obj) {
+    if (edns_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in EDNS PyObject conversion");
+    }
+    const s_EDNS* edns = static_cast<const s_EDNS*>(edns_obj);
+    return (*edns->cppobj);
 }
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/edns_python.h b/src/lib/dns/python/edns_python.h
new file mode 100644
index 0000000..30d92ab
--- /dev/null
+++ b/src/lib/dns/python/edns_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_EDNS_H
+#define __PYTHON_EDNS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class EDNS;
+
+namespace python {
+
+extern PyTypeObject edns_type;
+
+/// This is a simple shortcut to create a python EDNS object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createEDNSObject(const EDNS& source);
+
+/// \brief Checks if the given python object is a EDNS object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type EDNS, false otherwise
+bool PyEDNS_Check(PyObject* obj);
+
+/// \brief Returns a reference to the EDNS object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type EDNS; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyEDNS_Check()
+///
+/// \note This is not a copy; if the EDNS is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param edns_obj The edns object to convert
+const EDNS& PyEDNS_ToEDNS(const PyObject* edns_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_EDNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/message_python.cc b/src/lib/dns/python/message_python.cc
index 00596f8..48fff94 100644
--- a/src/lib/dns/python/message_python.cc
+++ b/src/lib/dns/python/message_python.cc
@@ -12,49 +12,44 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
 #include <exceptions/exceptions.h>
+#include <util/python/pycppwrapper_util.h>
 #include <dns/message.h>
 #include <dns/rcode.h>
 #include <dns/tsig.h>
-
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+
+#include "name_python.h"
+#include "question_python.h"
+#include "edns_python.h"
+#include "rcode_python.h"
+#include "opcode_python.h"
+#include "rrset_python.h"
+#include "message_python.h"
+#include "messagerenderer_python.h"
+#include "tsig_python.h"
+#include "tsigrecord_python.h"
+#include "pydnspp_common.h"
+
+using namespace std;
 using namespace isc::dns;
+using namespace isc::dns::python;
 using namespace isc::util;
+using namespace isc::util::python;
 
-namespace {
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-PyObject* po_MessageTooShort;
-PyObject* po_InvalidMessageSection;
-PyObject* po_InvalidMessageOperation;
-PyObject* po_InvalidMessageUDPSize;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Message
-//
+// Import pydoc text
+#include "message_python_inc.cc"
 
-// The s_* Class simply coverst one instantiation of the object
+namespace {
 class s_Message : public PyObject {
 public:
-    Message* message;
+    isc::dns::Message* cppobj;
 };
 
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
 int Message_init(s_Message* self, PyObject* args);
 void Message_destroy(s_Message* self);
 
@@ -71,8 +66,8 @@ PyObject* Message_setEDNS(s_Message* self, PyObject* args);
 PyObject* Message_getTSIGRecord(s_Message* self);
 PyObject* Message_getRRCount(s_Message* self, PyObject* args);
 // use direct iterators for these? (or simply lists for now?)
-PyObject* Message_getQuestion(s_Message* self);
-PyObject* Message_getSection(s_Message* self, PyObject* args);
+PyObject* Message_getQuestion(PyObject* self, PyObject*);
+PyObject* Message_getSection(PyObject* self, PyObject* args);
 //static PyObject* Message_beginQuestion(s_Message* self, PyObject* args);
 //static PyObject* Message_endQuestion(s_Message* self, PyObject* args);
 //static PyObject* Message_beginSection(s_Message* self, PyObject* args);
@@ -85,7 +80,7 @@ PyObject* Message_makeResponse(s_Message* self);
 PyObject* Message_toText(s_Message* self);
 PyObject* Message_str(PyObject* self);
 PyObject* Message_toWire(s_Message* self, PyObject* args);
-PyObject* Message_fromWire(s_Message* self, PyObject* args);
+PyObject* Message_fromWire(PyObject* pyself, PyObject* args);
 
 // This list contains the actual set of functions we have in
 // python. Each entry has
@@ -134,10 +129,10 @@ PyMethodDef Message_methods[] = {
     },
     { "get_rr_count", reinterpret_cast<PyCFunction>(Message_getRRCount), METH_VARARGS,
       "Returns the number of RRs contained in the given section." },
-    { "get_question", reinterpret_cast<PyCFunction>(Message_getQuestion), METH_NOARGS,
+    { "get_question", Message_getQuestion, METH_NOARGS,
       "Returns a list of all Question objects in the message "
       "(should be either 0 or 1)" },
-    { "get_section", reinterpret_cast<PyCFunction>(Message_getSection), METH_VARARGS,
+    { "get_section", Message_getSection, METH_VARARGS,
       "Returns a list of all RRset objects in the given section of the message\n"
       "The argument must be of type Section" },
     { "add_question", reinterpret_cast<PyCFunction>(Message_addQuestion), METH_VARARGS,
@@ -167,70 +162,10 @@ PyMethodDef Message_methods[] = {
       "If the given message is not in RENDER mode, an "
       "InvalidMessageOperation is raised.\n"
        },
-    { "from_wire", reinterpret_cast<PyCFunction>(Message_fromWire), METH_VARARGS,
-      "Parses the given wire format to a Message object.\n"
-      "The first argument is a Message to parse the data into.\n"
-      "The second argument must implement the buffer interface.\n"
-      "If the given message is not in PARSE mode, an "
-      "InvalidMessageOperation is raised.\n"
-      "Raises MessageTooShort, DNSMessageFORMERR or DNSMessageBADVERS "
-      " if there is a problem parsing the message." },
+    { "from_wire", Message_fromWire, METH_VARARGS, Message_fromWire_doc },
     { NULL, NULL, 0, NULL }
 };
 
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Message
-// Most of the functions are not actually implemented and NULL here.
-PyTypeObject message_type = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    "pydnspp.Message",
-    sizeof(s_Message),                  // tp_basicsize
-    0,                                  // tp_itemsize
-    (destructor)Message_destroy,        // tp_dealloc
-    NULL,                               // tp_print
-    NULL,                               // tp_getattr
-    NULL,                               // tp_setattr
-    NULL,                               // tp_reserved
-    NULL,                               // tp_repr
-    NULL,                               // tp_as_number
-    NULL,                               // tp_as_sequence
-    NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
-    NULL,                               // tp_call
-    Message_str,                        // tp_str
-    NULL,                               // tp_getattro
-    NULL,                               // tp_setattro
-    NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
-    "The Message class encapsulates a standard DNS message.",
-    NULL,                               // tp_traverse
-    NULL,                               // tp_clear
-    NULL,                               // tp_richcompare
-    0,                                  // tp_weaklistoffset
-    NULL,                               // tp_iter
-    NULL,                               // tp_iternext
-    Message_methods,                    // tp_methods
-    NULL,                               // tp_members
-    NULL,                               // tp_getset
-    NULL,                               // tp_base
-    NULL,                               // tp_dict
-    NULL,                               // tp_descr_get
-    NULL,                               // tp_descr_set
-    0,                                  // tp_dictoffset
-    (initproc)Message_init,             // tp_init
-    NULL,                               // tp_alloc
-    PyType_GenericNew,                  // tp_new
-    NULL,                               // tp_free
-    NULL,                               // tp_is_gc
-    NULL,                               // tp_bases
-    NULL,                               // tp_mro
-    NULL,                               // tp_cache
-    NULL,                               // tp_subclasses
-    NULL,                               // tp_weaklist
-    NULL,                               // tp_del
-    0                                   // tp_version_tag
-};
-
 int
 Message_init(s_Message* self, PyObject* args) {
     int i;
@@ -238,10 +173,10 @@ Message_init(s_Message* self, PyObject* args) {
     if (PyArg_ParseTuple(args, "i", &i)) {
         PyErr_Clear();
         if (i == Message::PARSE) {
-            self->message = new Message(Message::PARSE);
+            self->cppobj = new Message(Message::PARSE);
             return (0);
         } else if (i == Message::RENDER) {
-            self->message = new Message(Message::RENDER);
+            self->cppobj = new Message(Message::RENDER);
             return (0);
         } else {
             PyErr_SetString(PyExc_TypeError, "Message mode must be Message.PARSE or Message.RENDER");
@@ -256,8 +191,8 @@ Message_init(s_Message* self, PyObject* args) {
 
 void
 Message_destroy(s_Message* self) {
-    delete self->message;
-    self->message = NULL;
+    delete self->cppobj;
+    self->cppobj = NULL;
     Py_TYPE(self)->tp_free(self);
 }
 
@@ -271,7 +206,7 @@ Message_getHeaderFlag(s_Message* self, PyObject* args) {
         return (NULL);
     }
 
-    if (self->message->getHeaderFlag(
+    if (self->cppobj->getHeaderFlag(
             static_cast<Message::HeaderFlag>(messageflag))) {
         Py_RETURN_TRUE;
     } else {
@@ -296,7 +231,7 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
     }
 
     try {
-        self->message->setHeaderFlag(
+        self->cppobj->setHeaderFlag(
             static_cast<Message::HeaderFlag>(messageflag), on == Py_True);
         Py_RETURN_NONE;
     } catch (const InvalidMessageOperation& imo) {
@@ -312,7 +247,7 @@ Message_setHeaderFlag(s_Message* self, PyObject* args) {
 
 PyObject*
 Message_getQid(s_Message* self) {
-    return (Py_BuildValue("I", self->message->getQid()));
+    return (Py_BuildValue("I", self->cppobj->getQid()));
 }
 
 PyObject*
@@ -331,7 +266,7 @@ Message_setQid(s_Message* self, PyObject* args) {
     }
 
     try {
-        self->message->setQid(id);
+        self->cppobj->setQid(id);
         Py_RETURN_NONE;
     } catch (const InvalidMessageOperation& imo) {
         PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -341,35 +276,25 @@ Message_setQid(s_Message* self, PyObject* args) {
 
 PyObject*
 Message_getRcode(s_Message* self) {
-    s_Rcode* rcode;
-
-    rcode = static_cast<s_Rcode*>(rcode_type.tp_alloc(&rcode_type, 0));
-    if (rcode != NULL) {
-        rcode->cppobj = NULL;
-        try {
-            rcode->cppobj = new Rcode(self->message->getRcode());
-        } catch (const InvalidMessageOperation& imo) {
-            PyErr_SetString(po_InvalidMessageOperation, imo.what());
-        } catch (...) {
-            PyErr_SetString(po_IscException, "Unexpected exception");
-        }
-        if (rcode->cppobj == NULL) {
-            Py_DECREF(rcode);
-            return (NULL);
-        }
+    try {
+        return (createRcodeObject(self->cppobj->getRcode()));
+    } catch (const InvalidMessageOperation& imo) {
+        PyErr_SetString(po_InvalidMessageOperation, imo.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(po_IscException, "Unexpected exception");
+        return (NULL);
     }
-
-    return (rcode);
 }
 
 PyObject*
 Message_setRcode(s_Message* self, PyObject* args) {
-    s_Rcode* rcode;
+    PyObject* rcode;
     if (!PyArg_ParseTuple(args, "O!", &rcode_type, &rcode)) {
         return (NULL);
     }
     try {
-        self->message->setRcode(*rcode->cppobj);
+        self->cppobj->setRcode(PyRcode_ToRcode(rcode));
         Py_RETURN_NONE;
     } catch (const InvalidMessageOperation& imo) {
         PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -379,35 +304,31 @@ Message_setRcode(s_Message* self, PyObject* args) {
 
 PyObject*
 Message_getOpcode(s_Message* self) {
-    s_Opcode* opcode;
-
-    opcode = static_cast<s_Opcode*>(opcode_type.tp_alloc(&opcode_type, 0));
-    if (opcode != NULL) {
-        opcode->opcode = NULL;
-        try {
-            opcode->opcode = new Opcode(self->message->getOpcode());
-        } catch (const InvalidMessageOperation& imo) {
-            PyErr_SetString(po_InvalidMessageOperation, imo.what());
-        } catch (...) {
-            PyErr_SetString(po_IscException, "Unexpected exception");
-        }
-        if (opcode->opcode == NULL) {
-            Py_DECREF(opcode);
-            return (NULL);
-        }
+    try {
+        return (createOpcodeObject(self->cppobj->getOpcode()));
+    } catch (const InvalidMessageOperation& imo) {
+        PyErr_SetString(po_InvalidMessageOperation, imo.what());
+        return (NULL);
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Failed to get message opcode: " + string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(po_IscException,
+                        "Unexpected exception getting opcode from message");
+        return (NULL);
     }
-
-    return (opcode);
 }
 
 PyObject*
 Message_setOpcode(s_Message* self, PyObject* args) {
-    s_Opcode* opcode;
+    PyObject* opcode;
     if (!PyArg_ParseTuple(args, "O!", &opcode_type, &opcode)) {
         return (NULL);
     }
     try {
-        self->message->setOpcode(*opcode->opcode);
+        self->cppobj->setOpcode(PyOpcode_ToOpcode(opcode));
         Py_RETURN_NONE;
     } catch (const InvalidMessageOperation& imo) {
         PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -417,32 +338,31 @@ Message_setOpcode(s_Message* self, PyObject* args) {
 
 PyObject*
 Message_getEDNS(s_Message* self) {
-    s_EDNS* edns;
-    EDNS* edns_body;
-    ConstEDNSPtr src = self->message->getEDNS();
-
+    ConstEDNSPtr src = self->cppobj->getEDNS();
     if (!src) {
         Py_RETURN_NONE;
     }
-    if ((edns_body = new(nothrow) EDNS(*src)) == NULL) {
-        return (PyErr_NoMemory());
-    }
-    edns = static_cast<s_EDNS*>(opcode_type.tp_alloc(&edns_type, 0));
-    if (edns != NULL) {
-        edns->edns = edns_body;
+    try {
+        return (createEDNSObject(*src));
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Failed to get EDNS from message: " + string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting EDNS from message");
     }
-
-    return (edns);
+    return (NULL);
 }
 
 PyObject*
 Message_setEDNS(s_Message* self, PyObject* args) {
-    s_EDNS* edns;
+    PyObject* edns;
     if (!PyArg_ParseTuple(args, "O!", &edns_type, &edns)) {
         return (NULL);
     }
     try {
-        self->message->setEDNS(EDNSPtr(new EDNS(*edns->edns)));
+        self->cppobj->setEDNS(EDNSPtr(new EDNS(PyEDNS_ToEDNS(edns))));
         Py_RETURN_NONE;
     } catch (const InvalidMessageOperation& imo) {
         PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -453,7 +373,7 @@ Message_setEDNS(s_Message* self, PyObject* args) {
 PyObject*
 Message_getTSIGRecord(s_Message* self) {
     try {
-        const TSIGRecord* tsig_record = self->message->getTSIGRecord();
+        const TSIGRecord* tsig_record = self->cppobj->getTSIGRecord();
 
         if (tsig_record == NULL) {
             Py_RETURN_NONE;
@@ -483,7 +403,7 @@ Message_getRRCount(s_Message* self, PyObject* args) {
         return (NULL);
     }
     try {
-        return (Py_BuildValue("I", self->message->getRRCount(
+        return (Py_BuildValue("I", self->cppobj->getRRCount(
                                   static_cast<Message::Section>(section))));
     } catch (const isc::OutOfRange& ex) {
         PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -491,48 +411,59 @@ Message_getRRCount(s_Message* self, PyObject* args) {
     }
 }
 
+// This is a helper templated class commonly used for getQuestion and
+// getSection in order to build a list of Message section items.
+template <typename ItemType, typename CreatorParamType>
+class SectionInserter {
+    typedef PyObject* (*creator_t)(const CreatorParamType&);
+public:
+    SectionInserter(PyObject* pylist, creator_t creator) :
+        pylist_(pylist), creator_(creator)
+    {}
+    void operator()(ItemType item) {
+        if (PyList_Append(pylist_, PyObjectContainer(creator_(*item)).get())
+            == -1) {
+            isc_throw(PyCPPWrapperException, "PyList_Append failed, "
+                      "probably due to short memory");
+        }
+    }
+private:
+    PyObject* pylist_;
+    creator_t creator_;
+};
+
+typedef SectionInserter<ConstQuestionPtr, Question> QuestionInserter;
+typedef SectionInserter<ConstRRsetPtr, RRset> RRsetInserter;
+
 // TODO use direct iterators for these? (or simply lists for now?)
 PyObject*
-Message_getQuestion(s_Message* self) {
-    QuestionIterator qi, qi_end;
+Message_getQuestion(PyObject* po_self, PyObject*) {
+    const s_Message* const self = static_cast<s_Message*>(po_self);
+
     try {
-        qi = self->message->beginQuestion();
-        qi_end = self->message->endQuestion();
+        PyObjectContainer list_container(PyList_New(0));
+        for_each(self->cppobj->beginQuestion(),
+                 self->cppobj->endQuestion(),
+                 QuestionInserter(list_container.get(), createQuestionObject));
+        return (list_container.release());
     } catch (const InvalidMessageSection& ex) {
         PyErr_SetString(po_InvalidMessageSection, ex.what());
-        return (NULL);
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure in Message.get_question: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
     } catch (...) {
-        PyErr_SetString(po_IscException,
-                        "Unexpected exception in getting section iterators");
-        return (NULL);
-    }
-
-    PyObject* list = PyList_New(0);
-    if (list == NULL) {
-        return (NULL);
-    }
-
-    for (; qi != qi_end; ++qi) {
-        s_Question *question = static_cast<s_Question*>(
-            question_type.tp_alloc(&question_type, 0));
-        if (question == NULL) {
-            Py_DECREF(question);
-            Py_DECREF(list);
-            return (NULL);
-        }
-        question->question = *qi;
-        if (PyList_Append(list, question) == -1) {
-            Py_DECREF(question);
-            Py_DECREF(list);
-            return (NULL);
-        }
-        Py_DECREF(question);
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in Message.get_question");
     }
-    return (list);
+    return (NULL);
 }
 
 PyObject*
-Message_getSection(s_Message* self, PyObject* args) {
+Message_getSection(PyObject* po_self, PyObject* args) {
+    const s_Message* const self = static_cast<s_Message*>(po_self);
+
     unsigned int section;
     if (!PyArg_ParseTuple(args, "I", &section)) {
         PyErr_Clear();
@@ -540,47 +471,29 @@ Message_getSection(s_Message* self, PyObject* args) {
                         "no valid type in get_section argument");
         return (NULL);
     }
-    RRsetIterator rrsi, rrsi_end;
+
     try {
-        rrsi = self->message->beginSection(
-            static_cast<Message::Section>(section));
-        rrsi_end = self->message->endSection(
-            static_cast<Message::Section>(section));
+        PyObjectContainer list_container(PyList_New(0));
+        const Message::Section msgsection =
+            static_cast<Message::Section>(section);
+        for_each(self->cppobj->beginSection(msgsection),
+                 self->cppobj->endSection(msgsection),
+                 RRsetInserter(list_container.get(), createRRsetObject));
+        return (list_container.release());
     } catch (const isc::OutOfRange& ex) {
         PyErr_SetString(PyExc_OverflowError, ex.what());
-        return (NULL);
     } catch (const InvalidMessageSection& ex) {
         PyErr_SetString(po_InvalidMessageSection, ex.what());
-        return (NULL);
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure in Message.get_section: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
     } catch (...) {
-        PyErr_SetString(po_IscException,
-                        "Unexpected exception in getting section iterators");
-        return (NULL);
-    }
-
-    PyObject* list = PyList_New(0);
-    if (list == NULL) {
-        return (NULL);
-    }
-    for (; rrsi != rrsi_end; ++rrsi) {
-        s_RRset *rrset = static_cast<s_RRset*>(
-            rrset_type.tp_alloc(&rrset_type, 0));
-        if (rrset == NULL) {
-                Py_DECREF(rrset);
-                Py_DECREF(list);
-                return (NULL);
-        }
-        rrset->rrset = *rrsi;
-        if (PyList_Append(list, rrset) == -1) {
-                Py_DECREF(rrset);
-                Py_DECREF(list);
-                return (NULL);
-        }
-        // PyList_Append increases refcount, so we remove ours since
-        // we don't need it anymore
-        Py_DECREF(rrset);
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in Message.get_section");
     }
-    return (list);
+    return (NULL);
 }
 
 //static PyObject* Message_beginQuestion(s_Message* self, PyObject* args);
@@ -590,14 +503,14 @@ Message_getSection(s_Message* self, PyObject* args) {
 //static PyObject* Message_addQuestion(s_Message* self, PyObject* args);
 PyObject*
 Message_addQuestion(s_Message* self, PyObject* args) {
-    s_Question *question;
+    PyObject* question;
 
     if (!PyArg_ParseTuple(args, "O!", &question_type, &question)) {
         return (NULL);
     }
 
-    self->message->addQuestion(question->question);
-    
+    self->cppobj->addQuestion(PyQuestion_ToQuestion(question));
+
     Py_RETURN_NONE;
 }
 
@@ -605,15 +518,15 @@ PyObject*
 Message_addRRset(s_Message* self, PyObject* args) {
     PyObject *sign = Py_False;
     int section;
-    s_RRset* rrset;
+    PyObject* rrset;
     if (!PyArg_ParseTuple(args, "iO!|O!", &section, &rrset_type, &rrset,
                           &PyBool_Type, &sign)) {
         return (NULL);
     }
 
     try {
-        self->message->addRRset(static_cast<Message::Section>(section),
-                                rrset->rrset, sign == Py_True);
+        self->cppobj->addRRset(static_cast<Message::Section>(section),
+                               PyRRset_ToRRsetPtr(rrset), sign == Py_True);
         Py_RETURN_NONE;
     } catch (const InvalidMessageOperation& imo) {
         PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -634,10 +547,10 @@ Message_clear(s_Message* self, PyObject* args) {
     if (PyArg_ParseTuple(args, "i", &i)) {
         PyErr_Clear();
         if (i == Message::PARSE) {
-            self->message->clear(Message::PARSE);
+            self->cppobj->clear(Message::PARSE);
             Py_RETURN_NONE;
         } else if (i == Message::RENDER) {
-            self->message->clear(Message::RENDER);
+            self->cppobj->clear(Message::RENDER);
             Py_RETURN_NONE;
         } else {
             PyErr_SetString(PyExc_TypeError,
@@ -651,7 +564,7 @@ Message_clear(s_Message* self, PyObject* args) {
 
 PyObject*
 Message_makeResponse(s_Message* self) {
-    self->message->makeResponse();
+    self->cppobj->makeResponse();
     Py_RETURN_NONE;
 }
 
@@ -659,7 +572,7 @@ PyObject*
 Message_toText(s_Message* self) {
     // Py_BuildValue makes python objects from native data
     try {
-        return (Py_BuildValue("s", self->message->toText().c_str()));
+        return (Py_BuildValue("s", self->cppobj->toText().c_str()));
     } catch (const InvalidMessageOperation& imo) {
         PyErr_Clear();
         PyErr_SetString(po_InvalidMessageOperation, imo.what());
@@ -680,16 +593,17 @@ Message_str(PyObject* self) {
 
 PyObject*
 Message_toWire(s_Message* self, PyObject* args) {
-    s_MessageRenderer* mr;
-    s_TSIGContext* tsig_ctx = NULL;
-    
+    PyObject* mr;
+    PyObject* tsig_ctx = NULL;
+
     if (PyArg_ParseTuple(args, "O!|O!", &messagerenderer_type, &mr,
                          &tsigcontext_type, &tsig_ctx)) {
         try {
             if (tsig_ctx == NULL) {
-                self->message->toWire(*mr->messagerenderer);
+                self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
             } else {
-                self->message->toWire(*mr->messagerenderer, *tsig_ctx->cppobj);
+                self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr),
+                                     PyTSIGContext_ToTSIGContext(tsig_ctx));
             }
             // If we return NULL it is seen as an error, so use this for
             // None returns
@@ -721,97 +635,125 @@ Message_toWire(s_Message* self, PyObject* args) {
 }
 
 PyObject*
-Message_fromWire(s_Message* self, PyObject* args) {
+Message_fromWire(PyObject* pyself, PyObject* args) {
+    s_Message* const self = static_cast<s_Message*>(pyself);
     const char* b;
     Py_ssize_t len;
-    if (!PyArg_ParseTuple(args, "y#", &b, &len)) {
-        return (NULL);
-    }
-    
-    InputBuffer inbuf(b, len);
-    try {
-        self->message->fromWire(inbuf);
-        Py_RETURN_NONE;
-    } catch (const InvalidMessageOperation& imo) {
-        PyErr_SetString(po_InvalidMessageOperation, imo.what());
-        return (NULL);
-    } catch (const DNSMessageFORMERR& dmfe) {
-        PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
-        return (NULL);
-    } catch (const DNSMessageBADVERS& dmfe) {
-        PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
-        return (NULL);
-    } catch (const MessageTooShort& mts) {
-        PyErr_SetString(po_MessageTooShort, mts.what());
-        return (NULL);
+    unsigned int options = Message::PARSE_DEFAULT;
+        
+    if (PyArg_ParseTuple(args, "y#", &b, &len) ||
+        PyArg_ParseTuple(args, "y#I", &b, &len, &options)) {
+        // We need to clear the error in case the first call to ParseTuple
+        // fails.
+        PyErr_Clear();
+
+        InputBuffer inbuf(b, len);
+        try {
+            self->cppobj->fromWire(
+                inbuf, static_cast<Message::ParseOptions>(options));
+            Py_RETURN_NONE;
+        } catch (const InvalidMessageOperation& imo) {
+            PyErr_SetString(po_InvalidMessageOperation, imo.what());
+            return (NULL);
+        } catch (const DNSMessageFORMERR& dmfe) {
+            PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
+            return (NULL);
+        } catch (const DNSMessageBADVERS& dmfe) {
+            PyErr_SetString(po_DNSMessageBADVERS, dmfe.what());
+            return (NULL);
+        } catch (const MessageTooShort& mts) {
+            PyErr_SetString(po_MessageTooShort, mts.what());
+            return (NULL);
+        } catch (const InvalidBufferPosition& ex) {
+            PyErr_SetString(po_DNSMessageFORMERR, ex.what());
+            return (NULL);
+        } catch (const exception& ex) {
+            const string ex_what =
+                "Error in Message.from_wire: " + string(ex.what());
+            PyErr_SetString(PyExc_RuntimeError, ex_what.c_str());
+            return (NULL);
+        } catch (...) {
+            PyErr_SetString(PyExc_RuntimeError,
+                            "Unexpected exception in Message.from_wire");
+            return (NULL);
+        }
     }
-}
 
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Message(PyObject* mod) {
-    if (PyType_Ready(&message_type) < 0) {
-        return (false);
-    }
-    Py_INCREF(&message_type);
-    
-    // Class variables
-    // These are added to the tp_dict of the type object
-    //
-    addClassVariable(message_type, "PARSE",
-                     Py_BuildValue("I", Message::PARSE));
-    addClassVariable(message_type, "RENDER",
-                     Py_BuildValue("I", Message::RENDER));
-
-    addClassVariable(message_type, "HEADERFLAG_QR",
-                     Py_BuildValue("I", Message::HEADERFLAG_QR));
-    addClassVariable(message_type, "HEADERFLAG_AA",
-                     Py_BuildValue("I", Message::HEADERFLAG_AA));
-    addClassVariable(message_type, "HEADERFLAG_TC",
-                     Py_BuildValue("I", Message::HEADERFLAG_TC));
-    addClassVariable(message_type, "HEADERFLAG_RD",
-                     Py_BuildValue("I", Message::HEADERFLAG_RD));
-    addClassVariable(message_type, "HEADERFLAG_RA",
-                     Py_BuildValue("I", Message::HEADERFLAG_RA));
-    addClassVariable(message_type, "HEADERFLAG_AD",
-                     Py_BuildValue("I", Message::HEADERFLAG_AD));
-    addClassVariable(message_type, "HEADERFLAG_CD",
-                     Py_BuildValue("I", Message::HEADERFLAG_CD));
-
-    addClassVariable(message_type, "SECTION_QUESTION",
-                     Py_BuildValue("I", Message::SECTION_QUESTION));
-    addClassVariable(message_type, "SECTION_ANSWER",
-                     Py_BuildValue("I", Message::SECTION_ANSWER));
-    addClassVariable(message_type, "SECTION_AUTHORITY",
-                     Py_BuildValue("I", Message::SECTION_AUTHORITY));
-    addClassVariable(message_type, "SECTION_ADDITIONAL",
-                     Py_BuildValue("I", Message::SECTION_ADDITIONAL));
-
-    addClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
-                     Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
-
-    /* Class-specific exceptions */
-    po_MessageTooShort = PyErr_NewException("pydnspp.MessageTooShort", NULL,
-                                            NULL);
-    PyModule_AddObject(mod, "MessageTooShort", po_MessageTooShort);
-    po_InvalidMessageSection =
-        PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
-    PyModule_AddObject(mod, "InvalidMessageSection", po_InvalidMessageSection);
-    po_InvalidMessageOperation =
-        PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
-    PyModule_AddObject(mod, "InvalidMessageOperation",
-                       po_InvalidMessageOperation);
-    po_InvalidMessageUDPSize =
-        PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
-    PyModule_AddObject(mod, "InvalidMessageUDPSize", po_InvalidMessageUDPSize);
-    po_DNSMessageBADVERS = PyErr_NewException("pydnspp.DNSMessageBADVERS",
-                                              NULL, NULL);
-    PyModule_AddObject(mod, "DNSMessageBADVERS", po_DNSMessageBADVERS);
-
-    PyModule_AddObject(mod, "Message",
-                       reinterpret_cast<PyObject*>(&message_type));
-
-
-    return (true);
+    PyErr_SetString(PyExc_TypeError,
+                    "from_wire() arguments must be a byte object and "
+                    "(optional) parse options");
+    return (NULL);
 }
+
 } // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_MessageTooShort;
+PyObject* po_InvalidMessageSection;
+PyObject* po_InvalidMessageOperation;
+PyObject* po_InvalidMessageUDPSize;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Message
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject message_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.Message",
+    sizeof(s_Message),                  // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)Message_destroy,        // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    Message_str,                        // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The Message class encapsulates a standard DNS message.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    Message_methods,                    // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)Message_init,             // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/message_python.h b/src/lib/dns/python/message_python.h
new file mode 100644
index 0000000..be23890
--- /dev/null
+++ b/src/lib/dns/python/message_python.h
@@ -0,0 +1,40 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_MESSAGE_H
+#define __PYTHON_MESSAGE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Message;
+
+namespace python {
+
+extern PyObject* po_MessageTooShort;
+extern PyObject* po_InvalidMessageSection;
+extern PyObject* po_InvalidMessageOperation;
+extern PyObject* po_InvalidMessageUDPSize;
+
+extern PyTypeObject message_type;
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_MESSAGE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/message_python_inc.cc b/src/lib/dns/python/message_python_inc.cc
new file mode 100644
index 0000000..561c494
--- /dev/null
+++ b/src/lib/dns/python/message_python_inc.cc
@@ -0,0 +1,41 @@
+namespace {
+const char* const Message_fromWire_doc = "\
+from_wire(data, options=PARSE_DEFAULT)\n\
+\n\
+(Re)build a Message object from wire-format data.\n\
+\n\
+This method parses the given wire format data to build a complete\n\
+Message object. On success, the values of the header section fields\n\
+can be accessible via corresponding get methods, and the question and\n\
+following sections can be accessible via the corresponding iterators.\n\
+If the message contains an EDNS or TSIG, they can be accessible via\n\
+get_edns() and get_tsig_record(), respectively.\n\
+\n\
+This Message must be in the PARSE mode.\n\
+\n\
+This method performs strict validation on the given message based on\n\
+the DNS protocol specifications. If the given message data is invalid,\n\
+this method throws an exception (see the exception list).\n\
+\n\
+By default, this method combines RRs of the same name, RR type and RR\n\
+class in a section into a single RRset, even if they are interleaved\n\
+with a different type of RR (though it would be a rare case in\n\
+practice). If the PRESERVE_ORDER option is specified, it handles each\n\
+RR separately, in the appearing order, and converts it to a separate\n\
+RRset (so this RRset should contain exactly one Rdata). This mode will\n\
+be necessary when the higher level protocol is ordering conscious. For\n\
+example, in AXFR and IXFR, the position of the SOA RRs are crucial.\n\
+\n\
+Exceptions:\n\
+  InvalidMessageOperation Message is in the RENDER mode\n\
+  DNSMessageFORMERR The given message data is syntactically\n\
+  MessageTooShort The given data is shorter than a valid header\n\
+             section\n\
+  Others     Name, Rdata, and EDNS classes can also throw\n\
+\n\
+Parameters:\n\
+  data       A byte object of the wire data\n\
+  options    Parse options\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/dns/python/messagerenderer_python.cc b/src/lib/dns/python/messagerenderer_python.cc
index e6f5d3e..bb89622 100644
--- a/src/lib/dns/python/messagerenderer_python.cc
+++ b/src/lib/dns/python/messagerenderer_python.cc
@@ -17,6 +17,7 @@
 #include <util/buffer.h>
 
 #include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
 
 #include "pydnspp_common.h"
 #include "messagerenderer_python.h"
@@ -24,15 +25,21 @@
 using namespace isc::dns;
 using namespace isc::dns::python;
 using namespace isc::util;
-
-// MessageRenderer
-
-s_MessageRenderer::s_MessageRenderer() : outputbuffer(NULL),
-                                         messagerenderer(NULL)
-{
-}
+using namespace isc::util::python;
 
 namespace {
+// The s_* Class simply covers one instantiation of the object.
+//
+// since we don't use *Buffer in the python version (but work with
+// the already existing bytearray type where we use these custom buffers
+// in C++, we need to keep track of one here.
+class s_MessageRenderer : public PyObject {
+public:
+    s_MessageRenderer();
+    isc::util::OutputBuffer* outputbuffer;
+    MessageRenderer* cppobj;
+};
+
 int MessageRenderer_init(s_MessageRenderer* self);
 void MessageRenderer_destroy(s_MessageRenderer* self);
 
@@ -72,15 +79,15 @@ PyMethodDef MessageRenderer_methods[] = {
 int
 MessageRenderer_init(s_MessageRenderer* self) {
     self->outputbuffer = new OutputBuffer(4096);
-    self->messagerenderer = new MessageRenderer(*self->outputbuffer);
+    self->cppobj = new MessageRenderer(*self->outputbuffer);
     return (0);
 }
 
 void
 MessageRenderer_destroy(s_MessageRenderer* self) {
-    delete self->messagerenderer;
+    delete self->cppobj;
     delete self->outputbuffer;
-    self->messagerenderer = NULL;
+    self->cppobj = NULL;
     self->outputbuffer = NULL;
     Py_TYPE(self)->tp_free(self);
 }
@@ -88,18 +95,18 @@ MessageRenderer_destroy(s_MessageRenderer* self) {
 PyObject*
 MessageRenderer_getData(s_MessageRenderer* self) {
     return (Py_BuildValue("y#",
-                         self->messagerenderer->getData(),
-                          self->messagerenderer->getLength()));
+                          self->cppobj->getData(),
+                          self->cppobj->getLength()));
 }
 
 PyObject*
 MessageRenderer_getLength(s_MessageRenderer* self) {
-    return (Py_BuildValue("I", self->messagerenderer->getLength()));
+    return (Py_BuildValue("I", self->cppobj->getLength()));
 }
 
 PyObject*
 MessageRenderer_isTruncated(s_MessageRenderer* self) {
-    if (self->messagerenderer->isTruncated()) {
+    if (self->cppobj->isTruncated()) {
         Py_RETURN_TRUE;
     } else {
         Py_RETURN_FALSE;
@@ -108,17 +115,17 @@ MessageRenderer_isTruncated(s_MessageRenderer* self) {
 
 PyObject*
 MessageRenderer_getLengthLimit(s_MessageRenderer* self) {
-    return (Py_BuildValue("I", self->messagerenderer->getLengthLimit()));
+    return (Py_BuildValue("I", self->cppobj->getLengthLimit()));
 }
 
 PyObject*
 MessageRenderer_getCompressMode(s_MessageRenderer* self) {
-    return (Py_BuildValue("I", self->messagerenderer->getCompressMode()));
+    return (Py_BuildValue("I", self->cppobj->getCompressMode()));
 }
 
 PyObject*
 MessageRenderer_setTruncated(s_MessageRenderer* self) {
-    self->messagerenderer->setTruncated();
+    self->cppobj->setTruncated();
     Py_RETURN_NONE;
 }
 
@@ -138,7 +145,7 @@ MessageRenderer_setLengthLimit(s_MessageRenderer* self,
                         "MessageRenderer length limit out of range");
         return (NULL);
     }
-    self->messagerenderer->setLengthLimit(lengthlimit);
+    self->cppobj->setLengthLimit(lengthlimit);
     Py_RETURN_NONE;
 }
 
@@ -152,12 +159,12 @@ MessageRenderer_setCompressMode(s_MessageRenderer* self,
     }
 
     if (mode == MessageRenderer::CASE_INSENSITIVE) {
-        self->messagerenderer->setCompressMode(MessageRenderer::CASE_INSENSITIVE);
+        self->cppobj->setCompressMode(MessageRenderer::CASE_INSENSITIVE);
         // If we return NULL it is seen as an error, so use this for
         // None returns, it also applies to CASE_SENSITIVE.
         Py_RETURN_NONE;
     } else if (mode == MessageRenderer::CASE_SENSITIVE) {
-        self->messagerenderer->setCompressMode(MessageRenderer::CASE_SENSITIVE);
+        self->cppobj->setCompressMode(MessageRenderer::CASE_SENSITIVE);
         Py_RETURN_NONE;
     } else {
         PyErr_SetString(PyExc_TypeError,
@@ -169,12 +176,11 @@ MessageRenderer_setCompressMode(s_MessageRenderer* self,
 
 PyObject*
 MessageRenderer_clear(s_MessageRenderer* self) {
-    self->messagerenderer->clear();
+    self->cppobj->clear();
     Py_RETURN_NONE;
 }
 } // end of unnamed namespace
 
-// end of MessageRenderer
 namespace isc {
 namespace dns {
 namespace python {
@@ -233,37 +239,29 @@ PyTypeObject messagerenderer_type = {
     0                                   // tp_version_tag
 };
 
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_MessageRenderer(PyObject* mod) {
-    // Add the exceptions to the module
+// If we need a createMessageRendererObject(), should we copy? can we?
+// copy the existing buffer into a new one, then create a new renderer with
+// that buffer?
 
-    // Add the enums to the module
-
-    // Add the constants to the module
-
-    // Add the classes to the module
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module
+bool
+PyMessageRenderer_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &messagerenderer_type));
+}
 
-    // NameComparisonResult
-    if (PyType_Ready(&messagerenderer_type) < 0) {
-        return (false);
+MessageRenderer&
+PyMessageRenderer_ToMessageRenderer(PyObject* messagerenderer_obj) {
+    if (messagerenderer_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in MessageRenderer PyObject conversion");
     }
-    Py_INCREF(&messagerenderer_type);
+    s_MessageRenderer* messagerenderer = static_cast<s_MessageRenderer*>(messagerenderer_obj);
+    return (*messagerenderer->cppobj);
+}
 
-    // Class variables
-    // These are added to the tp_dict of the type object
-    addClassVariable(messagerenderer_type, "CASE_INSENSITIVE",
-                     Py_BuildValue("I", MessageRenderer::CASE_INSENSITIVE));
-    addClassVariable(messagerenderer_type, "CASE_SENSITIVE",
-                     Py_BuildValue("I", MessageRenderer::CASE_SENSITIVE));
 
-    PyModule_AddObject(mod, "MessageRenderer",
-                       reinterpret_cast<PyObject*>(&messagerenderer_type));
-    
-    return (true);
-}
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/messagerenderer_python.h b/src/lib/dns/python/messagerenderer_python.h
index 3bb096e..ea9a940 100644
--- a/src/lib/dns/python/messagerenderer_python.h
+++ b/src/lib/dns/python/messagerenderer_python.h
@@ -17,30 +17,35 @@
 
 #include <Python.h>
 
+#include <util/buffer.h>
+
 namespace isc {
-namespace util {
-class OutputBuffer;
-}
 namespace dns {
 class MessageRenderer;
 
 namespace python {
 
-// The s_* Class simply covers one instantiation of the object.
-//
-// since we don't use *Buffer in the python version (but work with
-// the already existing bytearray type where we use these custom buffers
-// in C++, we need to keep track of one here.
-class s_MessageRenderer : public PyObject {
-public:
-    s_MessageRenderer();
-    isc::util::OutputBuffer* outputbuffer;
-    MessageRenderer* messagerenderer;
-};
-
 extern PyTypeObject messagerenderer_type;
 
-bool initModulePart_MessageRenderer(PyObject* mod);
+/// \brief Checks if the given python object is a MessageRenderer object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type MessageRenderer, false otherwise
+bool PyMessageRenderer_Check(PyObject* obj);
+
+/// \brief Returns a reference to the MessageRenderer object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type MessageRenderer; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyMessageRenderer_Check()
+///
+/// \note This is not a copy; if the MessageRenderer is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param messagerenderer_obj The messagerenderer object to convert
+MessageRenderer& PyMessageRenderer_ToMessageRenderer(PyObject* messagerenderer_obj);
 
 } // namespace python
 } // namespace dns
diff --git a/src/lib/dns/python/name_python.cc b/src/lib/dns/python/name_python.cc
index d00c6f7..ce556df 100644
--- a/src/lib/dns/python/name_python.cc
+++ b/src/lib/dns/python/name_python.cc
@@ -25,20 +25,27 @@
 #include "messagerenderer_python.h"
 #include "name_python.h"
 
-//
-// Definition of the classes
-//
+#include <iostream>
 
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
 using namespace isc::dns;
 using namespace isc::dns::python;
 using namespace isc::util;
 using namespace isc::util::python;
 
 namespace {
-// NameComparisonResult
+// The s_* Class simply covers one instantiation of the object.
+class s_NameComparisonResult : public PyObject {
+public:
+    s_NameComparisonResult() : cppobj(NULL) {}
+    NameComparisonResult* cppobj;
+};
+
+class s_Name : public PyObject {
+public:
+    s_Name() : cppobj(NULL), position(0) {}
+    Name* cppobj;
+    size_t position;
+};
 
 int NameComparisonResult_init(s_NameComparisonResult*, PyObject*);
 void NameComparisonResult_destroy(s_NameComparisonResult* self);
@@ -84,9 +91,7 @@ PyObject*
 NameComparisonResult_getRelation(s_NameComparisonResult* self) {
     return (Py_BuildValue("I", self->cppobj->getRelation()));
 }
-// end of NameComparisonResult
 
-// Name
 // Shortcut type which would be convenient for adding class variables safely.
 typedef CPPPyObjectContainer<s_Name, Name> NameContainer;
 
@@ -94,7 +99,7 @@ int Name_init(s_Name* self, PyObject* args);
 void Name_destroy(s_Name* self);
 
 PyObject* Name_toWire(s_Name* self, PyObject* args);
-PyObject* Name_toText(s_Name* self);
+PyObject* Name_toText(s_Name* self, PyObject* args);
 PyObject* Name_str(PyObject* self);
 PyObject* Name_getLabelCount(s_Name* self);
 PyObject* Name_at(s_Name* self, PyObject* args);
@@ -117,8 +122,9 @@ PyMethodDef Name_methods[] = {
       "Returns the length" },
     { "get_labelcount", reinterpret_cast<PyCFunction>(Name_getLabelCount), METH_NOARGS,
       "Returns the number of labels" },
-    { "to_text", reinterpret_cast<PyCFunction>(Name_toText), METH_NOARGS,
-      "Returns the string representation" },
+    { "to_text", reinterpret_cast<PyCFunction>(Name_toText), METH_VARARGS,
+      "Returns the string representation. The optional argument must be either"
+      "True of False. If True, the final dot will be omitted." },
     { "to_wire", reinterpret_cast<PyCFunction>(Name_toWire), METH_VARARGS,
       "Converts the Name object to wire format.\n"
       "The argument can be either a MessageRenderer or an object that "
@@ -275,8 +281,24 @@ Name_getLabelCount(s_Name* self) {
 }
 
 PyObject*
-Name_toText(s_Name* self) {
-    return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+Name_toText(s_Name* self, PyObject* args) {
+    PyObject* omit_final_dot_obj = NULL;
+    if (PyArg_ParseTuple(args, "|O", &omit_final_dot_obj)) {
+        bool omit_final_dot = false;
+        if (omit_final_dot_obj != NULL) {
+            if (PyBool_Check(omit_final_dot_obj) != 0) {
+                omit_final_dot = (omit_final_dot_obj == Py_True);
+            } else {
+                PyErr_SetString(PyExc_TypeError,
+                    "Optional argument 1 of to_text() should be True of False");
+                return (NULL);
+            }
+        }
+        return (Py_BuildValue("s",
+                              self->cppobj->toText(omit_final_dot).c_str()));
+    } else {
+        return (NULL);
+    }
 }
 
 PyObject*
@@ -292,7 +314,7 @@ Name_str(PyObject* self) {
 PyObject*
 Name_toWire(s_Name* self, PyObject* args) {
     PyObject* bytes;
-    s_MessageRenderer* mr;
+    PyObject* mr;
 
     if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
         PyObject* bytes_o = bytes;
@@ -306,7 +328,7 @@ Name_toWire(s_Name* self, PyObject* args) {
         Py_DECREF(name_bytes);
         return (result);
     } else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
-        self->cppobj->toWire(*mr->messagerenderer);
+        self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
         // If we return NULL it is seen as an error, so use this for
         // None returns
         Py_RETURN_NONE;
@@ -495,7 +517,7 @@ Name_isWildCard(s_Name* self) {
         Py_RETURN_FALSE;
     }
 }
-// end of Name
+
 } // end of unnamed namespace
 
 namespace isc {
@@ -634,94 +656,32 @@ PyTypeObject name_type = {
     0                                   // tp_version_tag
 };
 
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Name(PyObject* mod) {
-    // Add the classes to the module
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module
-
-    //
-    // NameComparisonResult
-    //
-    if (PyType_Ready(&name_comparison_result_type) < 0) {
-        return (false);
-    }
-    Py_INCREF(&name_comparison_result_type);
-
-    // Add the enums to the module
-    po_NameRelation = Py_BuildValue("{i:s,i:s,i:s,i:s}",
-                                    NameComparisonResult::SUPERDOMAIN, "SUPERDOMAIN",
-                                    NameComparisonResult::SUBDOMAIN, "SUBDOMAIN",
-                                    NameComparisonResult::EQUAL, "EQUAL",
-                                    NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR");
-    addClassVariable(name_comparison_result_type, "NameRelation", po_NameRelation);
-
-    PyModule_AddObject(mod, "NameComparisonResult",
-                       reinterpret_cast<PyObject*>(&name_comparison_result_type));
-
-    //
-    // Name
-    //
-    
-    if (PyType_Ready(&name_type) < 0) {
-        return (false);
-    }
-    Py_INCREF(&name_type);
-
-    // Add the constants to the module
-    addClassVariable(name_type, "MAX_WIRE", Py_BuildValue("I", Name::MAX_WIRE));
-    addClassVariable(name_type, "MAX_LABELS", Py_BuildValue("I", Name::MAX_LABELS));
-    addClassVariable(name_type, "MAX_LABELLEN", Py_BuildValue("I", Name::MAX_LABELLEN));
-    addClassVariable(name_type, "MAX_COMPRESS_POINTER", Py_BuildValue("I", Name::MAX_COMPRESS_POINTER));
-    addClassVariable(name_type, "COMPRESS_POINTER_MARK8", Py_BuildValue("I", Name::COMPRESS_POINTER_MARK8));
-    addClassVariable(name_type, "COMPRESS_POINTER_MARK16", Py_BuildValue("I", Name::COMPRESS_POINTER_MARK16));
-
-    s_Name* root_name = PyObject_New(s_Name, &name_type);
-    root_name->cppobj = new Name(Name::ROOT_NAME());
-    PyObject* po_ROOT_NAME = root_name;
-    addClassVariable(name_type, "ROOT_NAME", po_ROOT_NAME);
-
-    PyModule_AddObject(mod, "Name",
-                       reinterpret_cast<PyObject*>(&name_type));
-    
-
-    // Add the exceptions to the module
-    po_EmptyLabel = PyErr_NewException("pydnspp.EmptyLabel", NULL, NULL);
-    PyModule_AddObject(mod, "EmptyLabel", po_EmptyLabel);
-
-    po_TooLongName = PyErr_NewException("pydnspp.TooLongName", NULL, NULL);
-    PyModule_AddObject(mod, "TooLongName", po_TooLongName);
-
-    po_TooLongLabel = PyErr_NewException("pydnspp.TooLongLabel", NULL, NULL);
-    PyModule_AddObject(mod, "TooLongLabel", po_TooLongLabel);
-
-    po_BadLabelType = PyErr_NewException("pydnspp.BadLabelType", NULL, NULL);
-    PyModule_AddObject(mod, "BadLabelType", po_BadLabelType);
-
-    po_BadEscape = PyErr_NewException("pydnspp.BadEscape", NULL, NULL);
-    PyModule_AddObject(mod, "BadEscape", po_BadEscape);
-
-    po_IncompleteName = PyErr_NewException("pydnspp.IncompleteName", NULL, NULL);
-    PyModule_AddObject(mod, "IncompleteName", po_IncompleteName);
-
-    po_InvalidBufferPosition = PyErr_NewException("pydnspp.InvalidBufferPosition", NULL, NULL);
-    PyModule_AddObject(mod, "InvalidBufferPosition", po_InvalidBufferPosition);
-
-    // This one could have gone into the message_python.cc file, but is
-    // already needed here.
-    po_DNSMessageFORMERR = PyErr_NewException("pydnspp.DNSMessageFORMERR", NULL, NULL);
-    PyModule_AddObject(mod, "DNSMessageFORMERR", po_DNSMessageFORMERR);
-
-    return (true);
-}
-
 PyObject*
 createNameObject(const Name& source) {
-    NameContainer container = PyObject_New(s_Name, &name_type);
+    NameContainer container(PyObject_New(s_Name, &name_type));
     container.set(new Name(source));
     return (container.release());
 }
+
+bool
+PyName_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &name_type));
+}
+
+const Name&
+PyName_ToName(const PyObject* name_obj) {
+    if (name_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Name PyObject conversion");
+    }
+    const s_Name* name = static_cast<const s_Name*>(name_obj);
+    return (*name->cppobj);
+}
+
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/name_python.h b/src/lib/dns/python/name_python.h
index f8e793d..86d7fd0 100644
--- a/src/lib/dns/python/name_python.h
+++ b/src/lib/dns/python/name_python.h
@@ -17,20 +17,12 @@
 
 #include <Python.h>
 
-#include <util/python/pycppwrapper_util.h>
-
 namespace isc {
 namespace dns {
-class NameComparisonResult;
 class Name;
 
 namespace python {
 
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the module init at the
-// end
-//
 extern PyObject* po_EmptyLabel;
 extern PyObject* po_TooLongName;
 extern PyObject* po_TooLongLabel;
@@ -47,25 +39,9 @@ extern PyObject* po_DNSMessageFORMERR;
 //
 extern PyObject* po_NameRelation;
 
-// The s_* Class simply covers one instantiation of the object.
-class s_NameComparisonResult : public PyObject {
-public:
-    s_NameComparisonResult() : cppobj(NULL) {}
-    NameComparisonResult* cppobj;
-};
-
-class s_Name : public PyObject {
-public:
-    s_Name() : cppobj(NULL), position(0) {}
-    Name* cppobj;
-    size_t position;
-};
-
 extern PyTypeObject name_comparison_result_type;
 extern PyTypeObject name_type;
 
-bool initModulePart_Name(PyObject* mod);
-
 /// This is A simple shortcut to create a python Name object (in the
 /// form of a pointer to PyObject) with minimal exception safety.
 /// On success, it returns a valid pointer to PyObject with a reference
@@ -74,6 +50,27 @@ bool initModulePart_Name(PyObject* mod);
 /// This function is expected to be called with in a try block
 /// followed by necessary setup for python exception.
 PyObject* createNameObject(const Name& source);
+
+/// \brief Checks if the given python object is a Name object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Name, false otherwise
+bool PyName_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Name object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type Name; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyName_Check()
+///
+/// \note This is not a copy; if the Name is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param name_obj The name object to convert
+const Name& PyName_ToName(const PyObject* name_obj);
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/opcode_python.cc b/src/lib/dns/python/opcode_python.cc
index 0e2a30b..50436a9 100644
--- a/src/lib/dns/python/opcode_python.cc
+++ b/src/lib/dns/python/opcode_python.cc
@@ -12,32 +12,31 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include <dns/opcode.h>
-
-using namespace isc::dns;
+#include <Python.h>
 
-//
-// Declaration of the custom exceptions (None for this class)
+#include <dns/opcode.h>
+#include <util/python/pycppwrapper_util.h>
 
-//
-// Definition of the classes
-//
+#include "pydnspp_common.h"
+#include "opcode_python.h"
+#include "edns_python.h"
 
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
 
 namespace {
-//
-// Opcode
-//
+
 class s_Opcode : public PyObject {
 public:
-    s_Opcode() : opcode(NULL), static_code(false) {}
-    const Opcode* opcode;
+    s_Opcode() : cppobj(NULL), static_code(false) {}
+    const isc::dns::Opcode* cppobj;
     bool static_code;
 };
 
+typedef CPPPyObjectContainer<s_Opcode, Opcode> OpcodeContainer;
+
 int Opcode_init(s_Opcode* const self, PyObject* args);
 void Opcode_destroy(s_Opcode* const self);
 
@@ -103,64 +102,13 @@ PyMethodDef Opcode_methods[] = {
     { NULL, NULL, 0, NULL }
 };
 
-PyTypeObject opcode_type = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    "pydnspp.Opcode",
-    sizeof(s_Opcode),                   // tp_basicsize
-    0,                                  // tp_itemsize
-    (destructor)Opcode_destroy,         // tp_dealloc
-    NULL,                               // tp_print
-    NULL,                               // tp_getattr
-    NULL,                               // tp_setattr
-    NULL,                               // tp_reserved
-    NULL,                               // tp_repr
-    NULL,                               // tp_as_number
-    NULL,                               // tp_as_sequence
-    NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
-    NULL,                               // tp_call
-    Opcode_str,                         // tp_str
-    NULL,                               // tp_getattro
-    NULL,                               // tp_setattro
-    NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
-    "The Opcode class objects represent standard OPCODEs "
-    "of the header section of DNS messages.",
-    NULL,                               // tp_traverse
-    NULL,                               // tp_clear
-    (richcmpfunc)Opcode_richcmp,        // tp_richcompare
-    0,                                  // tp_weaklistoffset
-    NULL,                               // tp_iter
-    NULL,                               // tp_iternext
-    Opcode_methods,                     // tp_methods
-    NULL,                               // tp_members
-    NULL,                               // tp_getset
-    NULL,                               // tp_base
-    NULL,                               // tp_dict
-    NULL,                               // tp_descr_get
-    NULL,                               // tp_descr_set
-    0,                                  // tp_dictoffset
-    (initproc)Opcode_init,              // tp_init
-    NULL,                               // tp_alloc
-    PyType_GenericNew,                  // tp_new
-    NULL,                               // tp_free
-    NULL,                               // tp_is_gc
-    NULL,                               // tp_bases
-    NULL,                               // tp_mro
-    NULL,                               // tp_cache
-    NULL,                               // tp_subclasses
-    NULL,                               // tp_weaklist
-    NULL,                               // tp_del
-    0                                   // tp_version_tag
-};
-
 
 int
 Opcode_init(s_Opcode* const self, PyObject* args) {
     uint8_t code = 0;
     if (PyArg_ParseTuple(args, "b", &code)) {
         try {
-            self->opcode = new Opcode(code);
+            self->cppobj = new Opcode(code);
             self->static_code = false;
         } catch (const isc::OutOfRange& ex) {
             PyErr_SetString(PyExc_OverflowError, ex.what());
@@ -181,22 +129,22 @@ Opcode_init(s_Opcode* const self, PyObject* args) {
 void
 Opcode_destroy(s_Opcode* const self) {
     // Depending on whether we created the rcode or are referring
-    // to a global static one, we do or do not delete self->opcode here
+    // to a global static one, we do or do not delete self->cppobj here
     if (!self->static_code) {
-        delete self->opcode;
+        delete self->cppobj;
     }
-    self->opcode = NULL;
+    self->cppobj = NULL;
     Py_TYPE(self)->tp_free(self);
 }
 
 PyObject*
 Opcode_getCode(const s_Opcode* const self) {
-    return (Py_BuildValue("I", self->opcode->getCode()));
+    return (Py_BuildValue("I", self->cppobj->getCode()));
 }
 
 PyObject*
 Opcode_toText(const s_Opcode* const self) {
-    return (Py_BuildValue("s", self->opcode->toText().c_str()));
+    return (Py_BuildValue("s", self->cppobj->toText().c_str()));
 }
 
 PyObject*
@@ -211,7 +159,7 @@ PyObject*
 Opcode_createStatic(const Opcode& opcode) {
     s_Opcode* ret = PyObject_New(s_Opcode, &opcode_type);
     if (ret != NULL) {
-        ret->opcode = &opcode;
+        ret->cppobj = &opcode;
         ret->static_code = true;
     }
     return (ret);
@@ -297,7 +245,7 @@ Opcode_RESERVED15(const s_Opcode*) {
     return (Opcode_createStatic(Opcode::RESERVED15()));
 }
 
-PyObject* 
+PyObject*
 Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
                const int op)
 {
@@ -318,10 +266,10 @@ Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
         PyErr_SetString(PyExc_TypeError, "Unorderable type; Opcode");
         return (NULL);
     case Py_EQ:
-        c = (*self->opcode == *other->opcode);
+        c = (*self->cppobj == *other->cppobj);
         break;
     case Py_NE:
-        c = (*self->opcode != *other->opcode);
+        c = (*self->cppobj != *other->cppobj);
         break;
     case Py_GT:
         PyErr_SetString(PyExc_TypeError, "Unorderable type; Opcode");
@@ -336,55 +284,88 @@ Opcode_richcmp(const s_Opcode* const self, const s_Opcode* const other,
         Py_RETURN_FALSE;
 }
 
-// Module Initialization, all statics are initialized here
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+PyTypeObject opcode_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.Opcode",
+    sizeof(s_Opcode),                   // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)Opcode_destroy,         // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    Opcode_str,                         // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The Opcode class objects represent standard OPCODEs "
+    "of the header section of DNS messages.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    (richcmpfunc)Opcode_richcmp,        // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    Opcode_methods,                     // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)Opcode_init,              // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createOpcodeObject(const Opcode& source) {
+    OpcodeContainer container(PyObject_New(s_Opcode, &opcode_type));
+    container.set(new Opcode(source));
+    return (container.release());
+}
+
 bool
-initModulePart_Opcode(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&opcode_type) < 0) {
-        return (false);
-    }
-    Py_INCREF(&opcode_type);
-    void* p = &opcode_type;
-    if (PyModule_AddObject(mod, "Opcode", static_cast<PyObject*>(p)) != 0) {
-        Py_DECREF(&opcode_type);
-        return (false);
+PyOpcode_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
+    return (PyObject_TypeCheck(obj, &opcode_type));
+}
 
-    addClassVariable(opcode_type, "QUERY_CODE",
-                     Py_BuildValue("h", Opcode::QUERY_CODE));
-    addClassVariable(opcode_type, "IQUERY_CODE",
-                     Py_BuildValue("h", Opcode::IQUERY_CODE));
-    addClassVariable(opcode_type, "STATUS_CODE",
-                     Py_BuildValue("h", Opcode::STATUS_CODE));
-    addClassVariable(opcode_type, "RESERVED3_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED3_CODE));
-    addClassVariable(opcode_type, "NOTIFY_CODE",
-                     Py_BuildValue("h", Opcode::NOTIFY_CODE));
-    addClassVariable(opcode_type, "UPDATE_CODE",
-                     Py_BuildValue("h", Opcode::UPDATE_CODE));
-    addClassVariable(opcode_type, "RESERVED6_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED6_CODE));
-    addClassVariable(opcode_type, "RESERVED7_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED7_CODE));
-    addClassVariable(opcode_type, "RESERVED8_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED8_CODE));
-    addClassVariable(opcode_type, "RESERVED9_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED9_CODE));
-    addClassVariable(opcode_type, "RESERVED10_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED10_CODE));
-    addClassVariable(opcode_type, "RESERVED11_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED11_CODE));
-    addClassVariable(opcode_type, "RESERVED12_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED12_CODE));
-    addClassVariable(opcode_type, "RESERVED13_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED13_CODE));
-    addClassVariable(opcode_type, "RESERVED14_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED14_CODE));
-    addClassVariable(opcode_type, "RESERVED15_CODE",
-                     Py_BuildValue("h", Opcode::RESERVED15_CODE));
-
-    return (true);
+const Opcode&
+PyOpcode_ToOpcode(const PyObject* opcode_obj) {
+    if (opcode_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Opcode PyObject conversion");
+    }
+    const s_Opcode* opcode = static_cast<const s_Opcode*>(opcode_obj);
+    return (*opcode->cppobj);
 }
-} // end of unnamed namespace
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/opcode_python.h b/src/lib/dns/python/opcode_python.h
new file mode 100644
index 0000000..d0aec15
--- /dev/null
+++ b/src/lib/dns/python/opcode_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_OPCODE_H
+#define __PYTHON_OPCODE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Opcode;
+
+namespace python {
+
+extern PyTypeObject opcode_type;
+
+/// This is a simple shortcut to create a python Opcode object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createOpcodeObject(const Opcode& source);
+
+/// \brief Checks if the given python object is a Opcode object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Opcode, false otherwise
+bool PyOpcode_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Opcode object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type Opcode; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyOpcode_Check()
+///
+/// \note This is not a copy; if the Opcode is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param opcode_obj The opcode object to convert
+const Opcode& PyOpcode_ToOpcode(const PyObject* opcode_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_OPCODE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index 07abf71..212141c 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -21,63 +21,720 @@
 // name initModulePart_<name>, and return true/false instead of
 // NULL/*mod
 //
-// And of course care has to be taken that all identifiers be unique
+// The big init function is split up into a separate initModulePart function
+// for each class we add.
 
 #define PY_SSIZE_T_CLEAN
 #include <Python.h>
 #include <structmember.h>
 
-#include <config.h>
-
-#include <exceptions/exceptions.h>
-
-#include <util/buffer.h>
-
-#include <dns/exceptions.h>
-#include <dns/name.h>
-#include <dns/messagerenderer.h>
+#include <dns/message.h>
+#include <dns/opcode.h>
+#include <dns/tsig.h>
+#include <util/python/pycppwrapper_util.h>
 
 #include "pydnspp_common.h"
+
+#include "edns_python.h"
+#include "message_python.h"
 #include "messagerenderer_python.h"
 #include "name_python.h"
+#include "opcode_python.h"
+#include "pydnspp_common.h"
+#include "pydnspp_towire.h"
+#include "question_python.h"
 #include "rcode_python.h"
+#include "rdata_python.h"
+#include "rrclass_python.h"
+#include "rrset_python.h"
+#include "rrttl_python.h"
+#include "rrtype_python.h"
+#include "serial_python.h"
+#include "tsigerror_python.h"
 #include "tsigkey_python.h"
+#include "tsig_python.h"
 #include "tsig_rdata_python.h"
-#include "tsigerror_python.h"
 #include "tsigrecord_python.h"
-#include "tsig_python.h"
 
-namespace isc {
-namespace dns {
-namespace python {
-// For our 'general' isc::Exceptions
-PyObject* po_IscException;
-PyObject* po_InvalidParameter;
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util::python;
+
+namespace {
+
+bool
+initModulePart_EDNS(PyObject* mod) {
+    // We initialize the static description object with PyType_Ready(),
+    // then add it to the module. This is not just a check! (leaving
+    // this out results in segmentation faults)
+    //
+    // After the type has been initialized, we initialize any exceptions
+    // that are defined in the wrapper for this class, and add constants
+    // to the type, if any
+
+    if (PyType_Ready(&edns_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&edns_type);
+    void* p = &edns_type;
+    PyModule_AddObject(mod, "EDNS", static_cast<PyObject*>(p));
+
+    addClassVariable(edns_type, "SUPPORTED_VERSION",
+                     Py_BuildValue("B", EDNS::SUPPORTED_VERSION));
 
-// For our own isc::dns::Exception
-PyObject* po_DNSMessageBADVERS;
+    return (true);
 }
+
+bool
+initModulePart_Message(PyObject* mod) {
+    if (PyType_Ready(&message_type) < 0) {
+        return (false);
+    }
+    void* p = &message_type;
+    if (PyModule_AddObject(mod, "Message", static_cast<PyObject*>(p)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&message_type);
+
+    try {
+        //
+        // Constant class variables
+        //
+
+        // Parse mode
+        installClassVariable(message_type, "PARSE",
+                             Py_BuildValue("I", Message::PARSE));
+        installClassVariable(message_type, "RENDER",
+                             Py_BuildValue("I", Message::RENDER));
+
+        // Parse options
+        installClassVariable(message_type, "PARSE_DEFAULT",
+                             Py_BuildValue("I", Message::PARSE_DEFAULT));
+        installClassVariable(message_type, "PRESERVE_ORDER",
+                             Py_BuildValue("I", Message::PRESERVE_ORDER));
+
+        // Header flags
+        installClassVariable(message_type, "HEADERFLAG_QR",
+                             Py_BuildValue("I", Message::HEADERFLAG_QR));
+        installClassVariable(message_type, "HEADERFLAG_AA",
+                             Py_BuildValue("I", Message::HEADERFLAG_AA));
+        installClassVariable(message_type, "HEADERFLAG_TC",
+                             Py_BuildValue("I", Message::HEADERFLAG_TC));
+        installClassVariable(message_type, "HEADERFLAG_RD",
+                             Py_BuildValue("I", Message::HEADERFLAG_RD));
+        installClassVariable(message_type, "HEADERFLAG_RA",
+                             Py_BuildValue("I", Message::HEADERFLAG_RA));
+        installClassVariable(message_type, "HEADERFLAG_AD",
+                             Py_BuildValue("I", Message::HEADERFLAG_AD));
+        installClassVariable(message_type, "HEADERFLAG_CD",
+                             Py_BuildValue("I", Message::HEADERFLAG_CD));
+
+        // Sections
+        installClassVariable(message_type, "SECTION_QUESTION",
+                             Py_BuildValue("I", Message::SECTION_QUESTION));
+        installClassVariable(message_type, "SECTION_ANSWER",
+                             Py_BuildValue("I", Message::SECTION_ANSWER));
+        installClassVariable(message_type, "SECTION_AUTHORITY",
+                             Py_BuildValue("I", Message::SECTION_AUTHORITY));
+        installClassVariable(message_type, "SECTION_ADDITIONAL",
+                             Py_BuildValue("I", Message::SECTION_ADDITIONAL));
+
+        // Protocol constant
+        installClassVariable(message_type, "DEFAULT_MAX_UDPSIZE",
+                             Py_BuildValue("I", Message::DEFAULT_MAX_UDPSIZE));
+
+        /* Class-specific exceptions */
+        po_MessageTooShort =
+            PyErr_NewException("pydnspp.MessageTooShort", NULL, NULL);
+        PyObjectContainer(po_MessageTooShort).installToModule(
+            mod, "MessageTooShort");
+        po_InvalidMessageSection =
+            PyErr_NewException("pydnspp.InvalidMessageSection", NULL, NULL);
+        PyObjectContainer(po_InvalidMessageSection).installToModule(
+            mod, "InvalidMessageSection");
+        po_InvalidMessageOperation =
+            PyErr_NewException("pydnspp.InvalidMessageOperation", NULL, NULL);
+        PyObjectContainer(po_InvalidMessageOperation).installToModule(
+            mod, "InvalidMessageOperation");
+        po_InvalidMessageUDPSize =
+            PyErr_NewException("pydnspp.InvalidMessageUDPSize", NULL, NULL);
+        PyObjectContainer(po_InvalidMessageUDPSize).installToModule(
+            mod, "InvalidMessageUDPSize");
+        po_DNSMessageBADVERS =
+            PyErr_NewException("pydnspp.DNSMessageBADVERS", NULL, NULL);
+        PyObjectContainer(po_DNSMessageBADVERS).installToModule(
+            mod, "DNSMessageBADVERS");
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in Message initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in Message initialization");
+        return (false);
+    }
+
+    return (true);
 }
+
+bool
+initModulePart_MessageRenderer(PyObject* mod) {
+    if (PyType_Ready(&messagerenderer_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&messagerenderer_type);
+
+    addClassVariable(messagerenderer_type, "CASE_INSENSITIVE",
+                     Py_BuildValue("I", MessageRenderer::CASE_INSENSITIVE));
+    addClassVariable(messagerenderer_type, "CASE_SENSITIVE",
+                     Py_BuildValue("I", MessageRenderer::CASE_SENSITIVE));
+
+    PyModule_AddObject(mod, "MessageRenderer",
+                       reinterpret_cast<PyObject*>(&messagerenderer_type));
+
+    return (true);
 }
 
-// order is important here!
-using namespace isc::dns::python;
+bool
+initModulePart_Name(PyObject* mod) {
 
-#include <dns/python/rrclass_python.cc>        // needs Messagerenderer
-#include <dns/python/rrtype_python.cc>         // needs Messagerenderer
-#include <dns/python/rrttl_python.cc>          // needs Messagerenderer
-#include <dns/python/rdata_python.cc>          // needs Type, Class
-#include <dns/python/rrset_python.cc>          // needs Rdata, RRTTL
-#include <dns/python/question_python.cc>       // needs RRClass, RRType, RRTTL,
-                                               // Name
-#include <dns/python/opcode_python.cc>
-#include <dns/python/edns_python.cc>           // needs Messagerenderer, Rcode
-#include <dns/python/message_python.cc>        // needs RRset, Question
+    //
+    // NameComparisonResult
+    //
+    if (PyType_Ready(&name_comparison_result_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&name_comparison_result_type);
+
+    // Add the enums to the module
+    po_NameRelation = Py_BuildValue("{i:s,i:s,i:s,i:s}",
+        NameComparisonResult::SUPERDOMAIN, "SUPERDOMAIN",
+        NameComparisonResult::SUBDOMAIN, "SUBDOMAIN",
+        NameComparisonResult::EQUAL, "EQUAL",
+        NameComparisonResult::COMMONANCESTOR, "COMMONANCESTOR");
+    addClassVariable(name_comparison_result_type, "NameRelation",
+                     po_NameRelation);
+
+    PyModule_AddObject(mod, "NameComparisonResult",
+        reinterpret_cast<PyObject*>(&name_comparison_result_type));
+
+    //
+    // Name
+    //
+
+    if (PyType_Ready(&name_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&name_type);
+
+    // Add the constants to the module
+    addClassVariable(name_type, "MAX_WIRE",
+                     Py_BuildValue("I", Name::MAX_WIRE));
+    addClassVariable(name_type, "MAX_LABELS",
+                     Py_BuildValue("I", Name::MAX_LABELS));
+    addClassVariable(name_type, "MAX_LABELLEN",
+                     Py_BuildValue("I", Name::MAX_LABELLEN));
+    addClassVariable(name_type, "MAX_COMPRESS_POINTER",
+                     Py_BuildValue("I", Name::MAX_COMPRESS_POINTER));
+    addClassVariable(name_type, "COMPRESS_POINTER_MARK8",
+                     Py_BuildValue("I", Name::COMPRESS_POINTER_MARK8));
+    addClassVariable(name_type, "COMPRESS_POINTER_MARK16",
+                     Py_BuildValue("I", Name::COMPRESS_POINTER_MARK16));
+
+    addClassVariable(name_type, "ROOT_NAME",
+                     createNameObject(Name::ROOT_NAME()));
+
+    PyModule_AddObject(mod, "Name",
+                       reinterpret_cast<PyObject*>(&name_type));
+
+
+    // Add the exceptions to the module
+    po_EmptyLabel = PyErr_NewException("pydnspp.EmptyLabel", NULL, NULL);
+    PyModule_AddObject(mod, "EmptyLabel", po_EmptyLabel);
+
+    po_TooLongName = PyErr_NewException("pydnspp.TooLongName", NULL, NULL);
+    PyModule_AddObject(mod, "TooLongName", po_TooLongName);
+
+    po_TooLongLabel = PyErr_NewException("pydnspp.TooLongLabel", NULL, NULL);
+    PyModule_AddObject(mod, "TooLongLabel", po_TooLongLabel);
+
+    po_BadLabelType = PyErr_NewException("pydnspp.BadLabelType", NULL, NULL);
+    PyModule_AddObject(mod, "BadLabelType", po_BadLabelType);
+
+    po_BadEscape = PyErr_NewException("pydnspp.BadEscape", NULL, NULL);
+    PyModule_AddObject(mod, "BadEscape", po_BadEscape);
+
+    po_IncompleteName = PyErr_NewException("pydnspp.IncompleteName", NULL, NULL);
+    PyModule_AddObject(mod, "IncompleteName", po_IncompleteName);
+
+    po_InvalidBufferPosition =
+        PyErr_NewException("pydnspp.InvalidBufferPosition", NULL, NULL);
+    PyModule_AddObject(mod, "InvalidBufferPosition", po_InvalidBufferPosition);
+
+    // This one could have gone into the message_python.cc file, but is
+    // already needed here.
+    po_DNSMessageFORMERR = PyErr_NewException("pydnspp.DNSMessageFORMERR",
+                                              NULL, NULL);
+    PyModule_AddObject(mod, "DNSMessageFORMERR", po_DNSMessageFORMERR);
+
+    return (true);
+}
+
+bool
+initModulePart_Opcode(PyObject* mod) {
+    if (PyType_Ready(&opcode_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&opcode_type);
+    void* p = &opcode_type;
+    if (PyModule_AddObject(mod, "Opcode", static_cast<PyObject*>(p)) != 0) {
+        Py_DECREF(&opcode_type);
+        return (false);
+    }
+
+    addClassVariable(opcode_type, "QUERY_CODE",
+                     Py_BuildValue("h", Opcode::QUERY_CODE));
+    addClassVariable(opcode_type, "IQUERY_CODE",
+                     Py_BuildValue("h", Opcode::IQUERY_CODE));
+    addClassVariable(opcode_type, "STATUS_CODE",
+                     Py_BuildValue("h", Opcode::STATUS_CODE));
+    addClassVariable(opcode_type, "RESERVED3_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED3_CODE));
+    addClassVariable(opcode_type, "NOTIFY_CODE",
+                     Py_BuildValue("h", Opcode::NOTIFY_CODE));
+    addClassVariable(opcode_type, "UPDATE_CODE",
+                     Py_BuildValue("h", Opcode::UPDATE_CODE));
+    addClassVariable(opcode_type, "RESERVED6_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED6_CODE));
+    addClassVariable(opcode_type, "RESERVED7_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED7_CODE));
+    addClassVariable(opcode_type, "RESERVED8_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED8_CODE));
+    addClassVariable(opcode_type, "RESERVED9_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED9_CODE));
+    addClassVariable(opcode_type, "RESERVED10_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED10_CODE));
+    addClassVariable(opcode_type, "RESERVED11_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED11_CODE));
+    addClassVariable(opcode_type, "RESERVED12_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED12_CODE));
+    addClassVariable(opcode_type, "RESERVED13_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED13_CODE));
+    addClassVariable(opcode_type, "RESERVED14_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED14_CODE));
+    addClassVariable(opcode_type, "RESERVED15_CODE",
+                     Py_BuildValue("h", Opcode::RESERVED15_CODE));
+
+    return (true);
+}
+
+bool
+initModulePart_Question(PyObject* mod) {
+    if (PyType_Ready(&question_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&question_type);
+    PyModule_AddObject(mod, "Question",
+                       reinterpret_cast<PyObject*>(&question_type));
+
+    return (true);
+}
+
+bool
+initModulePart_Rcode(PyObject* mod) {
+    if (PyType_Ready(&rcode_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&rcode_type);
+    void* p = &rcode_type;
+    if (PyModule_AddObject(mod, "Rcode", static_cast<PyObject*>(p)) != 0) {
+        Py_DECREF(&rcode_type);
+        return (false);
+    }
+
+    addClassVariable(rcode_type, "NOERROR_CODE",
+                     Py_BuildValue("h", Rcode::NOERROR_CODE));
+    addClassVariable(rcode_type, "FORMERR_CODE",
+                     Py_BuildValue("h", Rcode::FORMERR_CODE));
+    addClassVariable(rcode_type, "SERVFAIL_CODE",
+                     Py_BuildValue("h", Rcode::SERVFAIL_CODE));
+    addClassVariable(rcode_type, "NXDOMAIN_CODE",
+                     Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
+    addClassVariable(rcode_type, "NOTIMP_CODE",
+                     Py_BuildValue("h", Rcode::NOTIMP_CODE));
+    addClassVariable(rcode_type, "REFUSED_CODE",
+                     Py_BuildValue("h", Rcode::REFUSED_CODE));
+    addClassVariable(rcode_type, "YXDOMAIN_CODE",
+                     Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
+    addClassVariable(rcode_type, "YXRRSET_CODE",
+                     Py_BuildValue("h", Rcode::YXRRSET_CODE));
+    addClassVariable(rcode_type, "NXRRSET_CODE",
+                     Py_BuildValue("h", Rcode::NXRRSET_CODE));
+    addClassVariable(rcode_type, "NOTAUTH_CODE",
+                     Py_BuildValue("h", Rcode::NOTAUTH_CODE));
+    addClassVariable(rcode_type, "NOTZONE_CODE",
+                     Py_BuildValue("h", Rcode::NOTZONE_CODE));
+    addClassVariable(rcode_type, "RESERVED11_CODE",
+                     Py_BuildValue("h", Rcode::RESERVED11_CODE));
+    addClassVariable(rcode_type, "RESERVED12_CODE",
+                     Py_BuildValue("h", Rcode::RESERVED12_CODE));
+    addClassVariable(rcode_type, "RESERVED13_CODE",
+                     Py_BuildValue("h", Rcode::RESERVED13_CODE));
+    addClassVariable(rcode_type, "RESERVED14_CODE",
+                     Py_BuildValue("h", Rcode::RESERVED14_CODE));
+    addClassVariable(rcode_type, "RESERVED15_CODE",
+                     Py_BuildValue("h", Rcode::RESERVED15_CODE));
+    addClassVariable(rcode_type, "BADVERS_CODE",
+                     Py_BuildValue("h", Rcode::BADVERS_CODE));
+
+    return (true);
+}
+
+bool
+initModulePart_Rdata(PyObject* mod) {
+    if (PyType_Ready(&rdata_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&rdata_type);
+    PyModule_AddObject(mod, "Rdata",
+                       reinterpret_cast<PyObject*>(&rdata_type));
+
+    // Add the exceptions to the class
+    po_InvalidRdataLength = PyErr_NewException("pydnspp.InvalidRdataLength",
+                                               NULL, NULL);
+    PyModule_AddObject(mod, "InvalidRdataLength", po_InvalidRdataLength);
+
+    po_InvalidRdataText = PyErr_NewException("pydnspp.InvalidRdataText",
+                                             NULL, NULL);
+    PyModule_AddObject(mod, "InvalidRdataText", po_InvalidRdataText);
+
+    po_CharStringTooLong = PyErr_NewException("pydnspp.CharStringTooLong",
+                                              NULL, NULL);
+    PyModule_AddObject(mod, "CharStringTooLong", po_CharStringTooLong);
+
+
+    return (true);
+}
+
+bool
+initModulePart_RRClass(PyObject* mod) {
+    po_InvalidRRClass = PyErr_NewException("pydnspp.InvalidRRClass",
+                                           NULL, NULL);
+    Py_INCREF(po_InvalidRRClass);
+    PyModule_AddObject(mod, "InvalidRRClass", po_InvalidRRClass);
+    po_IncompleteRRClass = PyErr_NewException("pydnspp.IncompleteRRClass",
+                                              NULL, NULL);
+    Py_INCREF(po_IncompleteRRClass);
+    PyModule_AddObject(mod, "IncompleteRRClass", po_IncompleteRRClass);
+
+    if (PyType_Ready(&rrclass_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&rrclass_type);
+    PyModule_AddObject(mod, "RRClass",
+                       reinterpret_cast<PyObject*>(&rrclass_type));
+
+    return (true);
+}
+
+bool
+initModulePart_RRset(PyObject* mod) {
+    po_EmptyRRset = PyErr_NewException("pydnspp.EmptyRRset", NULL, NULL);
+    PyModule_AddObject(mod, "EmptyRRset", po_EmptyRRset);
+
+    // NameComparisonResult
+    if (PyType_Ready(&rrset_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&rrset_type);
+    PyModule_AddObject(mod, "RRset",
+                       reinterpret_cast<PyObject*>(&rrset_type));
+
+    return (true);
+}
+
+bool
+initModulePart_RRTTL(PyObject* mod) {
+    po_InvalidRRTTL = PyErr_NewException("pydnspp.InvalidRRTTL", NULL, NULL);
+    PyModule_AddObject(mod, "InvalidRRTTL", po_InvalidRRTTL);
+    po_IncompleteRRTTL = PyErr_NewException("pydnspp.IncompleteRRTTL",
+                                            NULL, NULL);
+    PyModule_AddObject(mod, "IncompleteRRTTL", po_IncompleteRRTTL);
+
+    if (PyType_Ready(&rrttl_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&rrttl_type);
+    PyModule_AddObject(mod, "RRTTL",
+                       reinterpret_cast<PyObject*>(&rrttl_type));
+
+    return (true);
+}
+
+bool
+initModulePart_RRType(PyObject* mod) {
+    // Add the exceptions to the module
+    po_InvalidRRType = PyErr_NewException("pydnspp.InvalidRRType", NULL, NULL);
+    PyModule_AddObject(mod, "InvalidRRType", po_InvalidRRType);
+    po_IncompleteRRType = PyErr_NewException("pydnspp.IncompleteRRType",
+                                             NULL, NULL);
+    PyModule_AddObject(mod, "IncompleteRRType", po_IncompleteRRType);
+
+    if (PyType_Ready(&rrtype_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&rrtype_type);
+    PyModule_AddObject(mod, "RRType",
+                       reinterpret_cast<PyObject*>(&rrtype_type));
+
+    return (true);
+}
+
+bool
+initModulePart_Serial(PyObject* mod) {
+    if (PyType_Ready(&serial_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&serial_type);
+    PyModule_AddObject(mod, "Serial",
+                       reinterpret_cast<PyObject*>(&serial_type));
+
+    return (true);
+}
+
+bool
+initModulePart_TSIGError(PyObject* mod) {
+    if (PyType_Ready(&tsigerror_type) < 0) {
+        return (false);
+    }
+    void* p = &tsigerror_type;
+    if (PyModule_AddObject(mod, "TSIGError", static_cast<PyObject*>(p)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&tsigerror_type);
+
+    try {
+        // Constant class variables
+        // Error codes (bare values)
+        installClassVariable(tsigerror_type, "BAD_SIG_CODE",
+                             Py_BuildValue("H", TSIGError::BAD_SIG_CODE));
+        installClassVariable(tsigerror_type, "BAD_KEY_CODE",
+                             Py_BuildValue("H", TSIGError::BAD_KEY_CODE));
+        installClassVariable(tsigerror_type, "BAD_TIME_CODE",
+                             Py_BuildValue("H", TSIGError::BAD_TIME_CODE));
+
+        // Error codes (constant objects)
+        installClassVariable(tsigerror_type, "NOERROR",
+                             createTSIGErrorObject(TSIGError::NOERROR()));
+        installClassVariable(tsigerror_type, "FORMERR",
+                             createTSIGErrorObject(TSIGError::FORMERR()));
+        installClassVariable(tsigerror_type, "SERVFAIL",
+                             createTSIGErrorObject(TSIGError::SERVFAIL()));
+        installClassVariable(tsigerror_type, "NXDOMAIN",
+                             createTSIGErrorObject(TSIGError::NXDOMAIN()));
+        installClassVariable(tsigerror_type, "NOTIMP",
+                             createTSIGErrorObject(TSIGError::NOTIMP()));
+        installClassVariable(tsigerror_type, "REFUSED",
+                             createTSIGErrorObject(TSIGError::REFUSED()));
+        installClassVariable(tsigerror_type, "YXDOMAIN",
+                             createTSIGErrorObject(TSIGError::YXDOMAIN()));
+        installClassVariable(tsigerror_type, "YXRRSET",
+                             createTSIGErrorObject(TSIGError::YXRRSET()));
+        installClassVariable(tsigerror_type, "NXRRSET",
+                             createTSIGErrorObject(TSIGError::NXRRSET()));
+        installClassVariable(tsigerror_type, "NOTAUTH",
+                             createTSIGErrorObject(TSIGError::NOTAUTH()));
+        installClassVariable(tsigerror_type, "NOTZONE",
+                             createTSIGErrorObject(TSIGError::NOTZONE()));
+        installClassVariable(tsigerror_type, "RESERVED11",
+                             createTSIGErrorObject(TSIGError::RESERVED11()));
+        installClassVariable(tsigerror_type, "RESERVED12",
+                             createTSIGErrorObject(TSIGError::RESERVED12()));
+        installClassVariable(tsigerror_type, "RESERVED13",
+                             createTSIGErrorObject(TSIGError::RESERVED13()));
+        installClassVariable(tsigerror_type, "RESERVED14",
+                             createTSIGErrorObject(TSIGError::RESERVED14()));
+        installClassVariable(tsigerror_type, "RESERVED15",
+                             createTSIGErrorObject(TSIGError::RESERVED15()));
+        installClassVariable(tsigerror_type, "BAD_SIG",
+                             createTSIGErrorObject(TSIGError::BAD_SIG()));
+        installClassVariable(tsigerror_type, "BAD_KEY",
+                             createTSIGErrorObject(TSIGError::BAD_KEY()));
+        installClassVariable(tsigerror_type, "BAD_TIME",
+                             createTSIGErrorObject(TSIGError::BAD_TIME()));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in TSIGError initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in TSIGError initialization");
+        return (false);
+    }
+
+    return (true);
+}
+
+bool
+initModulePart_TSIGKey(PyObject* mod) {
+    if (PyType_Ready(&tsigkey_type) < 0) {
+        return (false);
+    }
+    void* p = &tsigkey_type;
+    if (PyModule_AddObject(mod, "TSIGKey", static_cast<PyObject*>(p)) != 0) {
+        return (false);
+    }
+    Py_INCREF(&tsigkey_type);
+
+    try {
+        // Constant class variables
+        installClassVariable(tsigkey_type, "HMACMD5_NAME",
+                             createNameObject(TSIGKey::HMACMD5_NAME()));
+        installClassVariable(tsigkey_type, "HMACSHA1_NAME",
+                             createNameObject(TSIGKey::HMACSHA1_NAME()));
+        installClassVariable(tsigkey_type, "HMACSHA256_NAME",
+                             createNameObject(TSIGKey::HMACSHA256_NAME()));
+        installClassVariable(tsigkey_type, "HMACSHA224_NAME",
+                             createNameObject(TSIGKey::HMACSHA224_NAME()));
+        installClassVariable(tsigkey_type, "HMACSHA384_NAME",
+                             createNameObject(TSIGKey::HMACSHA384_NAME()));
+        installClassVariable(tsigkey_type, "HMACSHA512_NAME",
+                             createNameObject(TSIGKey::HMACSHA512_NAME()));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in TSIGKey initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in TSIGKey initialization");
+        return (false);
+    }
+
+    return (true);
+}
+
+bool
+initModulePart_TSIGKeyRing(PyObject* mod) {
+    if (PyType_Ready(&tsigkeyring_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&tsigkeyring_type);
+    void* p = &tsigkeyring_type;
+    if (PyModule_AddObject(mod, "TSIGKeyRing",
+                           static_cast<PyObject*>(p)) != 0) {
+        Py_DECREF(&tsigkeyring_type);
+        return (false);
+    }
+
+    addClassVariable(tsigkeyring_type, "SUCCESS",
+                     Py_BuildValue("I", TSIGKeyRing::SUCCESS));
+    addClassVariable(tsigkeyring_type, "EXIST",
+                     Py_BuildValue("I", TSIGKeyRing::EXIST));
+    addClassVariable(tsigkeyring_type, "NOTFOUND",
+                     Py_BuildValue("I", TSIGKeyRing::NOTFOUND));
+
+    return (true);
+}
+
+bool
+initModulePart_TSIGContext(PyObject* mod) {
+    if (PyType_Ready(&tsigcontext_type) < 0) {
+        return (false);
+    }
+    void* p = &tsigcontext_type;
+    if (PyModule_AddObject(mod, "TSIGContext",
+                           static_cast<PyObject*>(p)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&tsigcontext_type);
+
+    try {
+        // Class specific exceptions
+        po_TSIGContextError = PyErr_NewException("pydnspp.TSIGContextError",
+                                                 po_IscException, NULL);
+        PyObjectContainer(po_TSIGContextError).installToModule(
+            mod, "TSIGContextError");
+
+        // Constant class variables
+        installClassVariable(tsigcontext_type, "STATE_INIT",
+                             Py_BuildValue("I", TSIGContext::INIT));
+        installClassVariable(tsigcontext_type, "STATE_SENT_REQUEST",
+                             Py_BuildValue("I", TSIGContext::SENT_REQUEST));
+        installClassVariable(tsigcontext_type, "STATE_RECEIVED_REQUEST",
+                             Py_BuildValue("I", TSIGContext::RECEIVED_REQUEST));
+        installClassVariable(tsigcontext_type, "STATE_SENT_RESPONSE",
+                             Py_BuildValue("I", TSIGContext::SENT_RESPONSE));
+        installClassVariable(tsigcontext_type, "STATE_VERIFIED_RESPONSE",
+                             Py_BuildValue("I",
+                                           TSIGContext::VERIFIED_RESPONSE));
+
+        installClassVariable(tsigcontext_type, "DEFAULT_FUDGE",
+                             Py_BuildValue("H", TSIGContext::DEFAULT_FUDGE));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in TSIGContext initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in TSIGContext initialization");
+        return (false);
+    }
+
+    return (true);
+}
+
+bool
+initModulePart_TSIG(PyObject* mod) {
+    if (PyType_Ready(&tsig_type) < 0) {
+        return (false);
+    }
+    void* p = &tsig_type;
+    if (PyModule_AddObject(mod, "TSIG", static_cast<PyObject*>(p)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&tsig_type);
+
+    return (true);
+}
+
+bool
+initModulePart_TSIGRecord(PyObject* mod) {
+    if (PyType_Ready(&tsigrecord_type) < 0) {
+        return (false);
+    }
+    void* p = &tsigrecord_type;
+    if (PyModule_AddObject(mod, "TSIGRecord", static_cast<PyObject*>(p)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&tsigrecord_type);
+
+    try {
+        // Constant class variables
+        installClassVariable(tsigrecord_type, "TSIG_TTL",
+                             Py_BuildValue("I", 0));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in TSIGRecord initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in TSIGRecord initialization");
+        return (false);
+    }
+
+    return (true);
+}
 
-//
-// Definition of the module
-//
-namespace {
 PyModuleDef pydnspp = {
     { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
     "pydnspp",
@@ -160,6 +817,10 @@ PyInit_pydnspp(void) {
         return (NULL);
     }
 
+    if (!initModulePart_Serial(mod)) {
+        return (NULL);
+    }
+
     if (!initModulePart_TSIGKey(mod)) {
         return (NULL);
     }
diff --git a/src/lib/dns/python/pydnspp_common.cc b/src/lib/dns/python/pydnspp_common.cc
index 8ca763a..0f0f873 100644
--- a/src/lib/dns/python/pydnspp_common.cc
+++ b/src/lib/dns/python/pydnspp_common.cc
@@ -15,9 +15,45 @@
 #include <Python.h>
 #include <pydnspp_common.h>
 
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+
+#include <dns/exceptions.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
+#include "name_python.h"
+#include "rdata_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rrset_python.h"
+#include "rcode_python.h"
+#include "opcode_python.h"
+#include "tsigkey_python.h"
+#include "tsig_rdata_python.h"
+#include "tsigerror_python.h"
+#include "tsigrecord_python.h"
+#include "tsig_python.h"
+#include "question_python.h"
+#include "message_python.h"
+
+using namespace isc::dns::python;
+
 namespace isc {
 namespace dns {
 namespace python {
+// For our 'general' isc::Exceptions
+PyObject* po_IscException;
+PyObject* po_InvalidParameter;
+
+// For our own isc::dns::Exception
+PyObject* po_DNSMessageBADVERS;
+
+
 int
 readDataFromSequence(uint8_t *data, size_t len, PyObject* sequence) {
     PyObject* el = NULL;
diff --git a/src/lib/dns/python/pydnspp_common.h b/src/lib/dns/python/pydnspp_common.h
index ed90998..8092b08 100644
--- a/src/lib/dns/python/pydnspp_common.h
+++ b/src/lib/dns/python/pydnspp_common.h
@@ -20,8 +20,6 @@
 #include <stdexcept>
 #include <string>
 
-#include <util/python/pycppwrapper_util.h>
-
 namespace isc {
 namespace dns {
 namespace python {
diff --git a/src/lib/dns/python/pydnspp_towire.h b/src/lib/dns/python/pydnspp_towire.h
index 66362a0..e987a29 100644
--- a/src/lib/dns/python/pydnspp_towire.h
+++ b/src/lib/dns/python/pydnspp_towire.h
@@ -93,10 +93,10 @@ toWireWrapper(const PYSTRUCT* const self, PyObject* args) {
         }
 
         // To MessageRenderer version
-        s_MessageRenderer* renderer;
+        PyObject* renderer;
         if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &renderer)) {
             const unsigned int n = TOWIRECALLER(*self->cppobj)(
-                *renderer->messagerenderer);
+                PyMessageRenderer_ToMessageRenderer(renderer));
 
             return (Py_BuildValue("I", n));
         }
diff --git a/src/lib/dns/python/question_python.cc b/src/lib/dns/python/question_python.cc
index c702f85..44d68a2 100644
--- a/src/lib/dns/python/question_python.cc
+++ b/src/lib/dns/python/question_python.cc
@@ -12,25 +12,34 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
 #include <dns/question.h>
+#include <dns/messagerenderer.h>
+#include <dns/exceptions.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "pydnspp_common.h"
+#include "question_python.h"
+#include "name_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "messagerenderer_python.h"
+
+using namespace std;
 using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+using namespace isc;
 
-//
-// Question
-//
-
-// The s_* Class simply coverst one instantiation of the object
+namespace {
 class s_Question : public PyObject {
 public:
-    QuestionPtr question;
+    isc::dns::QuestionPtr cppobj;
 };
 
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
 static int Question_init(s_Question* self, PyObject* args);
 static void Question_destroy(s_Question* self);
 
@@ -69,60 +78,6 @@ static PyMethodDef Question_methods[] = {
     { NULL, NULL, 0, NULL }
 };
 
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_Question
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject question_type = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    "pydnspp.Question",
-    sizeof(s_Question),                 // tp_basicsize
-    0,                                  // tp_itemsize
-    (destructor)Question_destroy,       // tp_dealloc
-    NULL,                               // tp_print
-    NULL,                               // tp_getattr
-    NULL,                               // tp_setattr
-    NULL,                               // tp_reserved
-    NULL,                               // tp_repr
-    NULL,                               // tp_as_number
-    NULL,                               // tp_as_sequence
-    NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
-    NULL,                               // tp_call
-    Question_str,                       // tp_str
-    NULL,                               // tp_getattro
-    NULL,                               // tp_setattro
-    NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
-    "The Question class encapsulates the common search key of DNS"
-    "lookup, consisting of owner name, RR type and RR class.",
-    NULL,                               // tp_traverse
-    NULL,                               // tp_clear
-    NULL,                               // tp_richcompare
-    0,                                  // tp_weaklistoffset
-    NULL,                               // tp_iter
-    NULL,                               // tp_iternext
-    Question_methods,                   // tp_methods
-    NULL,                               // tp_members
-    NULL,                               // tp_getset
-    NULL,                               // tp_base
-    NULL,                               // tp_dict
-    NULL,                               // tp_descr_get
-    NULL,                               // tp_descr_set
-    0,                                  // tp_dictoffset
-    (initproc)Question_init,            // tp_init
-    NULL,                               // tp_alloc
-    PyType_GenericNew,                  // tp_new
-    NULL,                               // tp_free
-    NULL,                               // tp_is_gc
-    NULL,                               // tp_bases
-    NULL,                               // tp_mro
-    NULL,                               // tp_cache
-    NULL,                               // tp_subclasses
-    NULL,                               // tp_weaklist
-    NULL,                               // tp_del
-    0                                   // tp_version_tag
-};
-
 static int
 Question_init(s_Question* self, PyObject* args) {
     // Try out the various combinations of arguments to call the
@@ -131,9 +86,9 @@ Question_init(s_Question* self, PyObject* args) {
     // that if we try several like here. Otherwise the *next* python
     // call will suddenly appear to throw an exception.
     // (the way to do exceptions is to set PyErr and return -1)
-    s_Name* name;
-    s_RRClass* rrclass;
-    s_RRType* rrtype;
+    PyObject* name;
+    PyObject* rrclass;
+    PyObject* rrtype;
 
     const char* b;
     Py_ssize_t len;
@@ -141,17 +96,18 @@ Question_init(s_Question* self, PyObject* args) {
 
     try {
         if (PyArg_ParseTuple(args, "O!O!O!", &name_type, &name,
-                                               &rrclass_type, &rrclass,
-                                               &rrtype_type, &rrtype
+                                             &rrclass_type, &rrclass,
+                                             &rrtype_type, &rrtype
            )) {
-            self->question = QuestionPtr(new Question(*name->cppobj, *rrclass->rrclass,
-                                          *rrtype->rrtype));
+            self->cppobj = QuestionPtr(new Question(PyName_ToName(name),
+                                                    PyRRClass_ToRRClass(rrclass),
+                                                    PyRRType_ToRRType(rrtype)));
             return (0);
         } else if (PyArg_ParseTuple(args, "y#|I", &b, &len, &position)) {
             PyErr_Clear();
             InputBuffer inbuf(b, len);
             inbuf.setPosition(position);
-            self->question = QuestionPtr(new Question(inbuf));
+            self->cppobj = QuestionPtr(new Question(inbuf));
             return (0);
         }
     } catch (const DNSMessageFORMERR& dmfe) {
@@ -168,7 +124,7 @@ Question_init(s_Question* self, PyObject* args) {
         return (-1);
     }
 
-    self->question = QuestionPtr();
+    self->cppobj = QuestionPtr();
 
     PyErr_Clear();
     PyErr_SetString(PyExc_TypeError,
@@ -178,52 +134,62 @@ Question_init(s_Question* self, PyObject* args) {
 
 static void
 Question_destroy(s_Question* self) {
-    self->question.reset();
+    self->cppobj.reset();
     Py_TYPE(self)->tp_free(self);
 }
 
 static PyObject*
 Question_getName(s_Question* self) {
-    s_Name* name;
-
-    // is this the best way to do this?
-    name = static_cast<s_Name*>(name_type.tp_alloc(&name_type, 0));
-    if (name != NULL) {
-        name->cppobj = new Name(self->question->getName());
+    try {
+        return (createNameObject(self->cppobj->getName()));
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure getting question Name: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting question Name");
     }
-
-    return (name);
+    return (NULL);
 }
 
 static PyObject*
 Question_getType(s_Question* self) {
-    s_RRType* rrtype;
-
-    rrtype = static_cast<s_RRType*>(rrtype_type.tp_alloc(&rrtype_type, 0));
-    if (rrtype != NULL) {
-        rrtype->rrtype = new RRType(self->question->getType());
+    try {
+        return (createRRTypeObject(self->cppobj->getType()));
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure getting question RRType: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting question RRType");
     }
-
-    return (rrtype);
+    return (NULL);
 }
 
 static PyObject*
 Question_getClass(s_Question* self) {
-    s_RRClass* rrclass;
-
-    rrclass = static_cast<s_RRClass*>(rrclass_type.tp_alloc(&rrclass_type, 0));
-    if (rrclass != NULL) {
-        rrclass->rrclass = new RRClass(self->question->getClass());
+    try {
+        return (createRRClassObject(self->cppobj->getClass()));
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure getting question RRClass: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting question RRClass");
     }
-
-    return (rrclass);
+    return (NULL);
 }
 
-
 static PyObject*
 Question_toText(s_Question* self) {
     // Py_BuildValue makes python objects from native data
-    return (Py_BuildValue("s", self->question->toText().c_str()));
+    return (Py_BuildValue("s", self->cppobj->toText().c_str()));
 }
 
 static PyObject*
@@ -237,14 +203,14 @@ Question_str(PyObject* self) {
 static PyObject*
 Question_toWire(s_Question* self, PyObject* args) {
     PyObject* bytes;
-    s_MessageRenderer* mr;
-    
+    PyObject* mr;
+
     if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
         PyObject* bytes_o = bytes;
 
         // Max length is Name::MAX_WIRE + rrclass (2) + rrtype (2)
         OutputBuffer buffer(Name::MAX_WIRE + 4);
-        self->question->toWire(buffer);
+        self->cppobj->toWire(buffer);
         PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()),
                                                 buffer.getLength());
         PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
@@ -253,7 +219,7 @@ Question_toWire(s_Question* self, PyObject* args) {
         Py_DECREF(n);
         return (result);
     } else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
-        self->question->toWire(*mr->messagerenderer);
+        self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
         // If we return NULL it is seen as an error, so use this for
         // None returns
         Py_RETURN_NONE;
@@ -264,23 +230,92 @@ Question_toWire(s_Question* self, PyObject* args) {
     return (NULL);
 }
 
-// end of Question
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Question
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject question_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.Question",
+    sizeof(s_Question),                 // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)Question_destroy,       // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    Question_str,                       // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The Question class encapsulates the common search key of DNS"
+    "lookup, consisting of owner name, RR type and RR class.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    Question_methods,                   // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)Question_init,            // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
 
+PyObject*
+createQuestionObject(const Question& source) {
+    s_Question* question =
+        static_cast<s_Question*>(question_type.tp_alloc(&question_type, 0));
+    question->cppobj = QuestionPtr(new Question(source));
+    return (question);
+}
 
-// Module Initialization, all statics are initialized here
 bool
-initModulePart_Question(PyObject* mod) {
-    // Add the exceptions to the module
+PyQuestion_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &question_type));
+}
 
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&question_type) < 0) {
-        return (false);
+const Question&
+PyQuestion_ToQuestion(const PyObject* question_obj) {
+    if (question_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Question PyObject conversion");
     }
-    Py_INCREF(&question_type);
-    PyModule_AddObject(mod, "Question",
-                       reinterpret_cast<PyObject*>(&question_type));
-    
-    return (true);
+    const s_Question* question = static_cast<const s_Question*>(question_obj);
+    return (*question->cppobj);
 }
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/question_python.h b/src/lib/dns/python/question_python.h
new file mode 100644
index 0000000..f5d78b1
--- /dev/null
+++ b/src/lib/dns/python/question_python.h
@@ -0,0 +1,66 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_QUESTION_H
+#define __PYTHON_QUESTION_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Question;
+
+namespace python {
+
+extern PyObject* po_EmptyQuestion;
+
+extern PyTypeObject question_type;
+
+/// This is a simple shortcut to create a python Question object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createQuestionObject(const Question& source);
+
+/// \brief Checks if the given python object is a Question object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Question, false otherwise
+bool PyQuestion_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Question object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type Question; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyQuestion_Check()
+///
+/// \note This is not a copy; if the Question is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param question_obj The question object to convert
+const Question& PyQuestion_ToQuestion(const PyObject* question_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_QUESTION_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rcode_python.cc b/src/lib/dns/python/rcode_python.cc
index b594ad3..42b48e7 100644
--- a/src/lib/dns/python/rcode_python.cc
+++ b/src/lib/dns/python/rcode_python.cc
@@ -15,34 +15,39 @@
 #include <Python.h>
 
 #include <exceptions/exceptions.h>
-
 #include <dns/rcode.h>
+#include <util/python/pycppwrapper_util.h>
 
 #include "pydnspp_common.h"
 #include "rcode_python.h"
 
 using namespace isc::dns;
 using namespace isc::dns::python;
+using namespace isc::util::python;
 
+namespace {
+// The s_* Class simply covers one instantiation of the object.
 //
-// Declaration of the custom exceptions (None for this class)
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// Rcode
+// We added a helper variable static_code here
+// Since we can create Rcodes dynamically with Rcode(int), but also
+// use the static globals (Rcode::NOERROR() etc), we use this
+// variable to see if the code came from one of the latter, in which
+// case Rcode_destroy should not free it (the other option is to
+// allocate new Rcodes for every use of the static ones, but this
+// seems more efficient).
 //
+// Follow-up note: we don't have to use the proxy function in the python lib;
+// we can just define class specific constants directly (see TSIGError).
+// We should make this cleanup later.
+class s_Rcode : public PyObject {
+public:
+    s_Rcode() : cppobj(NULL), static_code(false) {};
+    const Rcode* cppobj;
+    bool static_code;
+};
 
-// Trivial constructor.
-s_Rcode::s_Rcode() : cppobj(NULL), static_code(false) {}
+typedef CPPPyObjectContainer<s_Rcode, Rcode> RcodeContainer;
 
-namespace {
 int Rcode_init(s_Rcode* const self, PyObject* args);
 void Rcode_destroy(s_Rcode* const self);
 
@@ -282,7 +287,7 @@ Rcode_BADVERS(const s_Rcode*) {
     return (Rcode_createStatic(Rcode::BADVERS()));
 }
 
-PyObject* 
+PyObject*
 Rcode_richcmp(const s_Rcode* const self, const s_Rcode* const other,
               const int op)
 {
@@ -376,59 +381,31 @@ PyTypeObject rcode_type = {
     0                                   // tp_version_tag
 };
 
-// Module Initialization, all statics are initialized here
+PyObject*
+createRcodeObject(const Rcode& source) {
+    RcodeContainer container(PyObject_New(s_Rcode, &rcode_type));
+    container.set(new Rcode(source));
+    return (container.release());
+}
+
 bool
-initModulePart_Rcode(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&rcode_type) < 0) {
-        return (false);
-    }
-    Py_INCREF(&rcode_type);
-    void* p = &rcode_type;
-    if (PyModule_AddObject(mod, "Rcode", static_cast<PyObject*>(p)) != 0) {
-        Py_DECREF(&rcode_type);
-        return (false);
+PyRcode_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
+    return (PyObject_TypeCheck(obj, &rcode_type));
+}
 
-    addClassVariable(rcode_type, "NOERROR_CODE",
-                     Py_BuildValue("h", Rcode::NOERROR_CODE));
-    addClassVariable(rcode_type, "FORMERR_CODE",
-                     Py_BuildValue("h", Rcode::FORMERR_CODE));
-    addClassVariable(rcode_type, "SERVFAIL_CODE",
-                     Py_BuildValue("h", Rcode::SERVFAIL_CODE));
-    addClassVariable(rcode_type, "NXDOMAIN_CODE",
-                     Py_BuildValue("h", Rcode::NXDOMAIN_CODE));
-    addClassVariable(rcode_type, "NOTIMP_CODE",
-                     Py_BuildValue("h", Rcode::NOTIMP_CODE));
-    addClassVariable(rcode_type, "REFUSED_CODE",
-                     Py_BuildValue("h", Rcode::REFUSED_CODE));
-    addClassVariable(rcode_type, "YXDOMAIN_CODE",
-                     Py_BuildValue("h", Rcode::YXDOMAIN_CODE));
-    addClassVariable(rcode_type, "YXRRSET_CODE",
-                     Py_BuildValue("h", Rcode::YXRRSET_CODE));
-    addClassVariable(rcode_type, "NXRRSET_CODE",
-                     Py_BuildValue("h", Rcode::NXRRSET_CODE));
-    addClassVariable(rcode_type, "NOTAUTH_CODE",
-                     Py_BuildValue("h", Rcode::NOTAUTH_CODE));
-    addClassVariable(rcode_type, "NOTZONE_CODE",
-                     Py_BuildValue("h", Rcode::NOTZONE_CODE));
-    addClassVariable(rcode_type, "RESERVED11_CODE",
-                     Py_BuildValue("h", Rcode::RESERVED11_CODE));
-    addClassVariable(rcode_type, "RESERVED12_CODE",
-                     Py_BuildValue("h", Rcode::RESERVED12_CODE));
-    addClassVariable(rcode_type, "RESERVED13_CODE",
-                     Py_BuildValue("h", Rcode::RESERVED13_CODE));
-    addClassVariable(rcode_type, "RESERVED14_CODE",
-                     Py_BuildValue("h", Rcode::RESERVED14_CODE));
-    addClassVariable(rcode_type, "RESERVED15_CODE",
-                     Py_BuildValue("h", Rcode::RESERVED15_CODE));
-    addClassVariable(rcode_type, "BADVERS_CODE",
-                     Py_BuildValue("h", Rcode::BADVERS_CODE));
-
-    return (true);
+const Rcode&
+PyRcode_ToRcode(const PyObject* rcode_obj) {
+    if (rcode_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Rcode PyObject conversion");
+    }
+    const s_Rcode* rcode = static_cast<const s_Rcode*>(rcode_obj);
+    return (*rcode->cppobj);
 }
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/rcode_python.h b/src/lib/dns/python/rcode_python.h
index 9b5e699..a149406 100644
--- a/src/lib/dns/python/rcode_python.h
+++ b/src/lib/dns/python/rcode_python.h
@@ -23,29 +23,36 @@ class Rcode;
 
 namespace python {
 
-// The s_* Class simply covers one instantiation of the object.
-//
-// We added a helper variable static_code here
-// Since we can create Rcodes dynamically with Rcode(int), but also
-// use the static globals (Rcode::NOERROR() etc), we use this
-// variable to see if the code came from one of the latter, in which
-// case Rcode_destroy should not free it (the other option is to
-// allocate new Rcodes for every use of the static ones, but this
-// seems more efficient).
-//
-// Follow-up note: we don't have to use the proxy function in the python lib;
-// we can just define class specific constants directly (see TSIGError).
-// We should make this cleanup later.
-class s_Rcode : public PyObject {
-public:
-    s_Rcode();
-    const Rcode* cppobj;
-    bool static_code;
-};
-
 extern PyTypeObject rcode_type;
 
-bool initModulePart_Rcode(PyObject* mod);
+/// This is a simple shortcut to create a python Rcode object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRcodeObject(const Rcode& source);
+
+/// \brief Checks if the given python object is a Rcode object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Rcode, false otherwise
+bool PyRcode_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Rcode object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type Rcode; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyRcode_Check()
+///
+/// \note This is not a copy; if the Rcode is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rcode_obj The rcode object to convert
+const Rcode& PyRcode_ToRcode(const PyObject* rcode_obj);
 
 } // namespace python
 } // namespace dns
diff --git a/src/lib/dns/python/rdata_python.cc b/src/lib/dns/python/rdata_python.cc
index faa4f4c..e4ff890 100644
--- a/src/lib/dns/python/rdata_python.cc
+++ b/src/lib/dns/python/rdata_python.cc
@@ -12,60 +12,71 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
 #include <dns/rdata.h>
+#include <dns/messagerenderer.h>
+#include <dns/exceptions.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rdata_python.h"
+#include "rrtype_python.h"
+#include "rrclass_python.h"
+#include "messagerenderer_python.h"
+#include "name_python.h"
+
 using namespace isc::dns;
+using namespace isc::dns::python;
 using namespace isc::util;
+using namespace isc::util::python;
 using namespace isc::dns::rdata;
 
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRdataLength;
-static PyObject* po_InvalidRdataText;
-static PyObject* po_CharStringTooLong;
-
-//
-// Definition of the classes
-//
+namespace {
 
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+typedef PyObject* method(PyObject* self, PyObject* args);
 
-//
-// Rdata
-//
-
-// The s_* Class simply coverst one instantiation of the object
+// Wrap a method into an exception handling, converting C++ exceptions
+// to python ones. The params and return value is just passed through.
+PyObject*
+exception_wrap(method* method, PyObject* self, PyObject* args) {
+    try {
+        return (method(self, args));
+    } catch (const std::exception& ex) {
+        // FIXME: These exceptions are not tested, I don't know how or if
+        // at all they can be triggered. But they are caught just in the case.
+        PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+                        ex.what()).c_str());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(PyExc_Exception, "Unknown exception");
+        return (NULL);
+    }
+}
 
-// Using a shared_ptr here should not really be necessary (PyObject
-// is already reference-counted), however internally on the cpp side,
-// not doing so might result in problems, since we can't copy construct
-// rdata field, adding them to rrsets results in a problem when the
-// rrset is destroyed later
 class s_Rdata : public PyObject {
 public:
-    RdataPtr rdata;
+    isc::dns::rdata::ConstRdataPtr cppobj;
 };
 
+typedef CPPPyObjectContainer<s_Rdata, Rdata> RdataContainer;
+
 //
 // We declare the functions here, the definitions are below
 // the type definition of the object, since both can use the other
 //
 
 // General creation and destruction
-static int Rdata_init(s_Rdata* self, PyObject* args);
-static void Rdata_destroy(s_Rdata* self);
+int Rdata_init(PyObject* self, PyObject* args, PyObject*);
+void Rdata_destroy(PyObject* self);
 
 // These are the functions we export
-static PyObject* Rdata_toText(s_Rdata* self);
+PyObject* Rdata_toText(PyObject* self, PyObject*);
 // This is a second version of toText, we need one where the argument
 // is a PyObject*, for the str() function in python.
-static PyObject* Rdata_str(PyObject* self);
-static PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
-static PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
+PyObject* Rdata_str(PyObject* self);
+PyObject* Rdata_toWire(PyObject* self, PyObject* args);
+PyObject* RData_richcmp(PyObject* self, PyObject* other, int op);
 
 // This list contains the actual set of functions we have in
 // python. Each entry has
@@ -73,10 +84,10 @@ static PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
 // 2. Our static function here
 // 3. Argument type
 // 4. Documentation
-static PyMethodDef Rdata_methods[] = {
-    { "to_text", reinterpret_cast<PyCFunction>(Rdata_toText), METH_NOARGS,
+PyMethodDef Rdata_methods[] = {
+    { "to_text", Rdata_toText, METH_NOARGS,
       "Returns the string representation" },
-    { "to_wire", reinterpret_cast<PyCFunction>(Rdata_toWire), METH_VARARGS,
+    { "to_wire", Rdata_toWire, METH_VARARGS,
       "Converts the Rdata object to wire format.\n"
       "The argument can be either a MessageRenderer or an object that "
       "implements the sequence interface. If the object is mutable "
@@ -86,15 +97,205 @@ static PyMethodDef Rdata_methods[] = {
     { NULL, NULL, 0, NULL }
 };
 
+int
+Rdata_init(PyObject* self_p, PyObject* args, PyObject*) {
+    PyObject* rrtype;
+    PyObject* rrclass;
+    const char* s;
+    const char* data;
+    Py_ssize_t len;
+    s_Rdata* self(static_cast<s_Rdata*>(self_p));
+
+    try {
+        // Create from string
+        if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
+                             &rrclass_type, &rrclass,
+                             &s)) {
+            self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+                                       PyRRClass_ToRRClass(rrclass), s);
+            return (0);
+        } else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
+                                    &rrclass_type, &rrclass, &data, &len)) {
+            InputBuffer input_buffer(data, len);
+            self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+                                       PyRRClass_ToRRClass(rrclass),
+                                       input_buffer, len);
+            return (0);
+        }
+    } catch (const isc::dns::rdata::InvalidRdataText& irdt) {
+        PyErr_SetString(po_InvalidRdataText, irdt.what());
+        return (-1);
+    } catch (const isc::dns::rdata::InvalidRdataLength& irdl) {
+        PyErr_SetString(po_InvalidRdataLength, irdl.what());
+        return (-1);
+    } catch (const isc::dns::rdata::CharStringTooLong& cstl) {
+        PyErr_SetString(po_CharStringTooLong, cstl.what());
+        return (-1);
+    } catch (const isc::dns::DNSMessageFORMERR& dmfe) {
+        PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
+        return (-1);
+    } catch (const std::exception& ex) {
+        // FIXME: These exceptions are not tested, I don't know how or if
+        // at all they can be triggered. But they are caught just in the case.
+        PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+                        ex.what()).c_str());
+        return (-1);
+    } catch (...) {
+        PyErr_SetString(PyExc_Exception, "Unknown exception");
+        return (-1);
+    }
+
+    return (-1);
+}
+
+void
+Rdata_destroy(PyObject* self) {
+    // Clear the shared_ptr so that its reference count is zero
+    // before we call tp_free() (there is no direct release())
+    static_cast<s_Rdata*>(self)->cppobj.reset();
+    Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+Rdata_toText_internal(PyObject* self, PyObject*) {
+    // Py_BuildValue makes python objects from native data
+    return (Py_BuildValue("s", static_cast<const s_Rdata*>(self)->cppobj->
+                          toText().c_str()));
+}
+
+PyObject*
+Rdata_toText(PyObject* self, PyObject* args) {
+    return (exception_wrap(&Rdata_toText_internal, self, args));
+}
+
+PyObject*
+Rdata_str(PyObject* self) {
+    // Simply call the to_text method we already defined
+    return (PyObject_CallMethod(self,
+                                const_cast<char*>("to_text"),
+                                const_cast<char*>("")));
+}
+
+PyObject*
+Rdata_toWire_internal(PyObject* self_p, PyObject* args) {
+    PyObject* bytes;
+    PyObject* mr;
+    const s_Rdata* self(static_cast<const s_Rdata*>(self_p));
+
+    if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
+        PyObject* bytes_o = bytes;
+
+        OutputBuffer buffer(4);
+        self->cppobj->toWire(buffer);
+        PyObject* rd_bytes = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
+        // Make sure exceptions from here are propagated.
+        // The exception is already set, so we just return NULL
+        if (rd_bytes == NULL) {
+            return (NULL);
+        }
+        PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
+        // We need to release the object we temporarily created here
+        // to prevent memory leak
+        Py_DECREF(rd_bytes);
+        return (result);
+    } else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
+        self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
+        // If we return NULL it is seen as an error, so use this for
+        // None returns
+        Py_RETURN_NONE;
+    }
+    PyErr_Clear();
+    PyErr_SetString(PyExc_TypeError,
+                    "toWire argument must be a sequence object or a MessageRenderer");
+    return (NULL);
+}
+
+PyObject*
+Rdata_toWire(PyObject* self, PyObject* args) {
+    return (exception_wrap(&Rdata_toWire_internal, self, args));
+}
+
+PyObject*
+RData_richcmp(PyObject* self_p, PyObject* other_p, int op) {
+    try {
+        bool c;
+        const s_Rdata* self(static_cast<const s_Rdata*>(self_p)),
+              * other(static_cast<const s_Rdata*>(other_p));
+
+        // Check for null and if the types match. If different type,
+        // simply return False
+        if (!other || (self->ob_type != other->ob_type)) {
+            Py_RETURN_FALSE;
+        }
+
+        switch (op) {
+            case Py_LT:
+                c = self->cppobj->compare(*other->cppobj) < 0;
+                break;
+            case Py_LE:
+                c = self->cppobj->compare(*other->cppobj) < 0 ||
+                    self->cppobj->compare(*other->cppobj) == 0;
+                break;
+            case Py_EQ:
+                c = self->cppobj->compare(*other->cppobj) == 0;
+                break;
+            case Py_NE:
+                c = self->cppobj->compare(*other->cppobj) != 0;
+                break;
+            case Py_GT:
+                c = self->cppobj->compare(*other->cppobj) > 0;
+                break;
+            case Py_GE:
+                c = self->cppobj->compare(*other->cppobj) > 0 ||
+                    self->cppobj->compare(*other->cppobj) == 0;
+                break;
+            default:
+                PyErr_SetString(PyExc_IndexError,
+                                "Unhandled rich comparison operator");
+                return (NULL);
+        }
+        if (c) {
+            Py_RETURN_TRUE;
+        } else {
+            Py_RETURN_FALSE;
+        }
+    } catch (const std::exception& ex) {
+        // FIXME: These exceptions are not tested, I don't know how or if
+        // at all they can be triggered. But they are caught just in the case.
+        PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+                        ex.what()).c_str());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(PyExc_Exception, "Unknown exception");
+        return (NULL);
+    }
+}
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp
+//
+PyObject* po_InvalidRdataLength;
+PyObject* po_InvalidRdataText;
+PyObject* po_CharStringTooLong;
+
 // This defines the complete type for reflection in python and
 // parsing of PyObject* to s_Rdata
 // Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rdata_type = {
+PyTypeObject rdata_type = {
     PyVarObject_HEAD_INIT(NULL, 0)
     "pydnspp.Rdata",
     sizeof(s_Rdata),                    // tp_basicsize
     0,                                  // tp_itemsize
-    (destructor)Rdata_destroy,          // tp_dealloc
+    Rdata_destroy,                      // tp_dealloc
     NULL,                               // tp_print
     NULL,                               // tp_getattr
     NULL,                               // tp_setattr
@@ -103,7 +304,7 @@ static PyTypeObject rdata_type = {
     NULL,                               // tp_as_number
     NULL,                               // tp_as_sequence
     NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
+    NULL,                               // tp_hash
     NULL,                               // tp_call
     Rdata_str,                          // tp_str
     NULL,                               // tp_getattro
@@ -114,7 +315,7 @@ static PyTypeObject rdata_type = {
     "a set of common interfaces to manipulate concrete RDATA objects.",
     NULL,                               // tp_traverse
     NULL,                               // tp_clear
-    (richcmpfunc)RData_richcmp,         // tp_richcompare
+    RData_richcmp,                      // tp_richcompare
     0,                                  // tp_weaklistoffset
     NULL,                               // tp_iter
     NULL,                               // tp_iternext
@@ -126,7 +327,7 @@ static PyTypeObject rdata_type = {
     NULL,                               // tp_descr_get
     NULL,                               // tp_descr_set
     0,                                  // tp_dictoffset
-    (initproc)Rdata_init,               // tp_init
+    Rdata_init,                         // tp_init
     NULL,                               // tp_alloc
     PyType_GenericNew,                  // tp_new
     NULL,                               // tp_free
@@ -140,150 +341,36 @@ static PyTypeObject rdata_type = {
     0                                   // tp_version_tag
 };
 
-static int
-Rdata_init(s_Rdata* self, PyObject* args) {
-    s_RRType* rrtype;
-    s_RRClass* rrclass;
-    const char* s;
-    const char* data;
-    Py_ssize_t len;
-
-    // Create from string
-    if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
-                                        &rrclass_type, &rrclass,
-                                        &s)) {
-        self->rdata = createRdata(*rrtype->rrtype, *rrclass->rrclass, s);
-        return (0);
-    } else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
-                                &rrclass_type, &rrclass, &data, &len)) {
-        InputBuffer input_buffer(data, len);
-        self->rdata = createRdata(*rrtype->rrtype, *rrclass->rrclass,
-                                  input_buffer, len);
-        return (0);
+PyObject*
+createRdataObject(ConstRdataPtr source) {
+    s_Rdata* py_rdata =
+        static_cast<s_Rdata*>(rdata_type.tp_alloc(&rdata_type, 0));
+    if (py_rdata == NULL) {
+        isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+                  "probably due to short memory");
     }
-
-    return (-1);
-}
-
-static void
-Rdata_destroy(s_Rdata* self) {
-    // Clear the shared_ptr so that its reference count is zero
-    // before we call tp_free() (there is no direct release())
-    self->rdata.reset();
-    Py_TYPE(self)->tp_free(self);
-}
-
-static PyObject*
-Rdata_toText(s_Rdata* self) {
-    // Py_BuildValue makes python objects from native data
-    return (Py_BuildValue("s", self->rdata->toText().c_str()));
-}
-
-static PyObject*
-Rdata_str(PyObject* self) {
-    // Simply call the to_text method we already defined
-    return (PyObject_CallMethod(self,
-                               const_cast<char*>("to_text"),
-                                const_cast<char*>("")));
+    py_rdata->cppobj = source;
+    return (py_rdata);
 }
 
-static PyObject*
-Rdata_toWire(s_Rdata* self, PyObject* args) {
-    PyObject* bytes;
-    s_MessageRenderer* mr;
-    
-    if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
-        PyObject* bytes_o = bytes;
-        
-        OutputBuffer buffer(4);
-        self->rdata->toWire(buffer);
-        PyObject* rd_bytes = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
-        PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
-        // We need to release the object we temporarily created here
-        // to prevent memory leak
-        Py_DECREF(rd_bytes);
-        return (result);
-    } else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
-        self->rdata->toWire(*mr->messagerenderer);
-        // If we return NULL it is seen as an error, so use this for
-        // None returns
-        Py_RETURN_NONE;
+bool
+PyRdata_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
-    PyErr_Clear();
-    PyErr_SetString(PyExc_TypeError,
-                    "toWire argument must be a sequence object or a MessageRenderer");
-    return (NULL);
+    return (PyObject_TypeCheck(obj, &rdata_type));
 }
 
-
-
-static PyObject* 
-RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
-    bool c;
-
-    // Check for null and if the types match. If different type,
-    // simply return False
-    if (!other || (self->ob_type != other->ob_type)) {
-        Py_RETURN_FALSE;
-    }
-
-    switch (op) {
-    case Py_LT:
-        c = self->rdata->compare(*other->rdata) < 0;
-        break;
-    case Py_LE:
-        c = self->rdata->compare(*other->rdata) < 0 ||
-            self->rdata->compare(*other->rdata) == 0;
-        break;
-    case Py_EQ:
-        c = self->rdata->compare(*other->rdata) == 0;
-        break;
-    case Py_NE:
-        c = self->rdata->compare(*other->rdata) != 0;
-        break;
-    case Py_GT:
-        c = self->rdata->compare(*other->rdata) > 0;
-        break;
-    case Py_GE:
-        c = self->rdata->compare(*other->rdata) > 0 ||
-            self->rdata->compare(*other->rdata) == 0;
-        break;
-    default:
-        PyErr_SetString(PyExc_IndexError,
-                        "Unhandled rich comparison operator");
-        return (NULL);
+const Rdata&
+PyRdata_ToRdata(const PyObject* rdata_obj) {
+    if (rdata_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Rdata PyObject conversion");
     }
-    if (c)
-        Py_RETURN_TRUE;
-    else
-        Py_RETURN_FALSE;
+    const s_Rdata* rdata = static_cast<const s_Rdata*>(rdata_obj);
+    return (*rdata->cppobj);
 }
-// end of Rdata
 
-
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_Rdata(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&rdata_type) < 0) {
-        return (false);
-    }
-    Py_INCREF(&rdata_type);
-    PyModule_AddObject(mod, "Rdata",
-                       reinterpret_cast<PyObject*>(&rdata_type));
-
-    // Add the exceptions to the class
-    po_InvalidRdataLength = PyErr_NewException("pydnspp.InvalidRdataLength", NULL, NULL);
-    PyModule_AddObject(mod, "InvalidRdataLength", po_InvalidRdataLength);
-
-    po_InvalidRdataText = PyErr_NewException("pydnspp.InvalidRdataText", NULL, NULL);
-    PyModule_AddObject(mod, "InvalidRdataText", po_InvalidRdataText);
-
-    po_CharStringTooLong = PyErr_NewException("pydnspp.CharStringTooLong", NULL, NULL);
-    PyModule_AddObject(mod, "CharStringTooLong", po_CharStringTooLong);
-
-    
-    return (true);
-}
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/rdata_python.h b/src/lib/dns/python/rdata_python.h
new file mode 100644
index 0000000..c7ddd57
--- /dev/null
+++ b/src/lib/dns/python/rdata_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RDATA_H
+#define __PYTHON_RDATA_H 1
+
+#include <Python.h>
+
+#include <dns/rdata.h>
+
+namespace isc {
+namespace dns {
+namespace python {
+
+extern PyObject* po_InvalidRdataLength;
+extern PyObject* po_InvalidRdataText;
+extern PyObject* po_CharStringTooLong;
+
+extern PyTypeObject rdata_type;
+
+/// This is a simple shortcut to create a python Rdata object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRdataObject(isc::dns::rdata::ConstRdataPtr source);
+
+/// \brief Checks if the given python object is a Rdata object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Rdata, false otherwise
+bool PyRdata_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Rdata object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type Rdata; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyRdata_Check()
+///
+/// \note This is not a copy; if the Rdata is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rdata_obj The rdata object to convert
+const isc::dns::rdata::Rdata& PyRdata_ToRdata(const PyObject* rdata_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RDATA_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrclass_python.cc b/src/lib/dns/python/rrclass_python.cc
index 6d150c2..0014187 100644
--- a/src/lib/dns/python/rrclass_python.cc
+++ b/src/lib/dns/python/rrclass_python.cc
@@ -11,35 +11,28 @@
 // LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
+#include <Python.h>
 
 #include <dns/rrclass.h>
-using namespace isc::dns;
-using namespace isc::util;
-
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRClass;
-static PyObject* po_IncompleteRRClass;
-
-//
-// Definition of the classes
-//
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
 
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+#include "rrclass_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
 
-//
-// RRClass
-//
 
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+namespace {
 // The s_* Class simply covers one instantiation of the object
 class s_RRClass : public PyObject {
 public:
-    RRClass* rrclass;
+    s_RRClass() : cppobj(NULL) {};
+    RRClass* cppobj;
 };
 
 //
@@ -48,25 +41,26 @@ public:
 //
 
 // General creation and destruction
-static int RRClass_init(s_RRClass* self, PyObject* args);
-static void RRClass_destroy(s_RRClass* self);
+int RRClass_init(s_RRClass* self, PyObject* args);
+void RRClass_destroy(s_RRClass* self);
 
 // These are the functions we export
-static PyObject* RRClass_toText(s_RRClass* self);
+PyObject* RRClass_toText(s_RRClass* self);
 // This is a second version of toText, we need one where the argument
 // is a PyObject*, for the str() function in python.
-static PyObject* RRClass_str(PyObject* self);
-static PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
-static PyObject* RRClass_getCode(s_RRClass* self);
-static PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
+PyObject* RRClass_str(PyObject* self);
+PyObject* RRClass_toWire(s_RRClass* self, PyObject* args);
+PyObject* RRClass_getCode(s_RRClass* self);
+PyObject* RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op);
 
 // Static function for direct class creation
-static PyObject* RRClass_IN(s_RRClass *self);
-static PyObject* RRClass_CH(s_RRClass *self);
-static PyObject* RRClass_HS(s_RRClass *self);
-static PyObject* RRClass_NONE(s_RRClass *self);
-static PyObject* RRClass_ANY(s_RRClass *self);
+PyObject* RRClass_IN(s_RRClass *self);
+PyObject* RRClass_CH(s_RRClass *self);
+PyObject* RRClass_HS(s_RRClass *self);
+PyObject* RRClass_NONE(s_RRClass *self);
+PyObject* RRClass_ANY(s_RRClass *self);
 
+typedef CPPPyObjectContainer<s_RRClass, RRClass> RRClassContainer;
 
 // This list contains the actual set of functions we have in
 // python. Each entry has
@@ -74,7 +68,7 @@ static PyObject* RRClass_ANY(s_RRClass *self);
 // 2. Our static function here
 // 3. Argument type
 // 4. Documentation
-static PyMethodDef RRClass_methods[] = {
+PyMethodDef RRClass_methods[] = {
     { "to_text", reinterpret_cast<PyCFunction>(RRClass_toText), METH_NOARGS,
       "Returns the string representation" },
     { "to_wire", reinterpret_cast<PyCFunction>(RRClass_toWire), METH_VARARGS,
@@ -94,63 +88,7 @@ static PyMethodDef RRClass_methods[] = {
     { NULL, NULL, 0, NULL }
 };
 
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRClass
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrclass_type = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    "pydnspp.RRClass",
-    sizeof(s_RRClass),                  // tp_basicsize
-    0,                                  // tp_itemsize
-    (destructor)RRClass_destroy,        // tp_dealloc
-    NULL,                               // tp_print
-    NULL,                               // tp_getattr
-    NULL,                               // tp_setattr
-    NULL,                               // tp_reserved
-    NULL,                               // tp_repr
-    NULL,                               // tp_as_number
-    NULL,                               // tp_as_sequence
-    NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
-    NULL,                               // tp_call
-    RRClass_str,                        // tp_str
-    NULL,                               // tp_getattro
-    NULL,                               // tp_setattro
-    NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
-    "The RRClass class encapsulates DNS resource record classes.\n"
-    "This class manages the 16-bit integer class codes in quite a straightforward"
-    "way.  The only non trivial task is to handle textual representations of"
-    "RR classes, such as \"IN\", \"CH\", or \"CLASS65534\".",
-    NULL,                               // tp_traverse
-    NULL,                               // tp_clear
-    (richcmpfunc)RRClass_richcmp,       // tp_richcompare
-    0,                                  // tp_weaklistoffset
-    NULL,                               // tp_iter
-    NULL,                               // tp_iternext
-    RRClass_methods,                    // tp_methods
-    NULL,                               // tp_members
-    NULL,                               // tp_getset
-    NULL,                               // tp_base
-    NULL,                               // tp_dict
-    NULL,                               // tp_descr_get
-    NULL,                               // tp_descr_set
-    0,                                  // tp_dictoffset
-    (initproc)RRClass_init,             // tp_init
-    NULL,                               // tp_alloc
-    PyType_GenericNew,                  // tp_new
-    NULL,                               // tp_free
-    NULL,                               // tp_is_gc
-    NULL,                               // tp_bases
-    NULL,                               // tp_mro
-    NULL,                               // tp_cache
-    NULL,                               // tp_subclasses
-    NULL,                               // tp_weaklist
-    NULL,                               // tp_del
-    0                                   // tp_version_tag
-};
-
-static int
+int
 RRClass_init(s_RRClass* self, PyObject* args) {
     const char* s;
     long i;
@@ -164,7 +102,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
     // (the way to do exceptions is to set PyErr and return -1)
     try {
         if (PyArg_ParseTuple(args, "s", &s)) {
-            self->rrclass = new RRClass(s);
+            self->cppobj = new RRClass(s);
             return (0);
         } else if (PyArg_ParseTuple(args, "l", &i)) {
             if (i < 0 || i > 0xffff) {
@@ -173,7 +111,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
                                 "RR class number out of range");
                 return (-1);
             }
-            self->rrclass = new RRClass(i);
+            self->cppobj = new RRClass(i);
             return (0);
         } else if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
             uint8_t data[2];
@@ -182,7 +120,7 @@ RRClass_init(s_RRClass* self, PyObject* args) {
                 return (result);
             }
             InputBuffer ib(data, 2);
-            self->rrclass = new RRClass(ib);
+            self->cppobj = new RRClass(ib);
             PyErr_Clear();
             return (0);
         }
@@ -199,20 +137,20 @@ RRClass_init(s_RRClass* self, PyObject* args) {
     return (-1);
 }
 
-static void
+void
 RRClass_destroy(s_RRClass* self) {
-    delete self->rrclass;
-    self->rrclass = NULL;
+    delete self->cppobj;
+    self->cppobj = NULL;
     Py_TYPE(self)->tp_free(self);
 }
 
-static PyObject*
+PyObject*
 RRClass_toText(s_RRClass* self) {
     // Py_BuildValue makes python objects from native data
-    return (Py_BuildValue("s", self->rrclass->toText().c_str()));
+    return (Py_BuildValue("s", self->cppobj->toText().c_str()));
 }
 
-static PyObject*
+PyObject*
 RRClass_str(PyObject* self) {
     // Simply call the to_text method we already defined
     return (PyObject_CallMethod(self,
@@ -220,16 +158,16 @@ RRClass_str(PyObject* self) {
                                 const_cast<char*>("")));
 }
 
-static PyObject*
+PyObject*
 RRClass_toWire(s_RRClass* self, PyObject* args) {
     PyObject* bytes;
-    s_MessageRenderer* mr;
-    
+    PyObject* mr;
+
     if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
         PyObject* bytes_o = bytes;
-        
+
         OutputBuffer buffer(2);
-        self->rrclass->toWire(buffer);
+        self->cppobj->toWire(buffer);
         PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
         PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
         // We need to release the object we temporarily created here
@@ -237,7 +175,7 @@ RRClass_toWire(s_RRClass* self, PyObject* args) {
         Py_DECREF(n);
         return (result);
     } else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
-        self->rrclass->toWire(*mr->messagerenderer);
+        self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
         // If we return NULL it is seen as an error, so use this for
         // None returns
         Py_RETURN_NONE;
@@ -248,12 +186,12 @@ RRClass_toWire(s_RRClass* self, PyObject* args) {
     return (NULL);
 }
 
-static PyObject*
+PyObject*
 RRClass_getCode(s_RRClass* self) {
-    return (Py_BuildValue("I", self->rrclass->getCode()));
+    return (Py_BuildValue("I", self->cppobj->getCode()));
 }
 
-static PyObject* 
+PyObject*
 RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
     bool c;
 
@@ -265,24 +203,24 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
 
     switch (op) {
     case Py_LT:
-        c = *self->rrclass < *other->rrclass;
+        c = *self->cppobj < *other->cppobj;
         break;
     case Py_LE:
-        c = *self->rrclass < *other->rrclass ||
-            *self->rrclass == *other->rrclass;
+        c = *self->cppobj < *other->cppobj ||
+            *self->cppobj == *other->cppobj;
         break;
     case Py_EQ:
-        c = *self->rrclass == *other->rrclass;
+        c = *self->cppobj == *other->cppobj;
         break;
     case Py_NE:
-        c = *self->rrclass != *other->rrclass;
+        c = *self->cppobj != *other->cppobj;
         break;
     case Py_GT:
-        c = *other->rrclass < *self->rrclass;
+        c = *other->cppobj < *self->cppobj;
         break;
     case Py_GE:
-        c = *other->rrclass < *self->rrclass ||
-            *self->rrclass == *other->rrclass;
+        c = *other->cppobj < *self->cppobj ||
+            *self->cppobj == *other->cppobj;
         break;
     default:
         PyErr_SetString(PyExc_IndexError,
@@ -298,56 +236,131 @@ RRClass_richcmp(s_RRClass* self, s_RRClass* other, int op) {
 //
 // Common function for RRClass_IN/CH/etc.
 //
-static PyObject* RRClass_createStatic(RRClass stc) {
+PyObject* RRClass_createStatic(RRClass stc) {
     s_RRClass* ret = PyObject_New(s_RRClass, &rrclass_type);
     if (ret != NULL) {
-        ret->rrclass = new RRClass(stc);
+        ret->cppobj = new RRClass(stc);
     }
     return (ret);
 }
 
-static PyObject* RRClass_IN(s_RRClass*) {
+PyObject* RRClass_IN(s_RRClass*) {
     return (RRClass_createStatic(RRClass::IN()));
 }
 
-static PyObject* RRClass_CH(s_RRClass*) {
+PyObject* RRClass_CH(s_RRClass*) {
     return (RRClass_createStatic(RRClass::CH()));
 }
 
-static PyObject* RRClass_HS(s_RRClass*) {
+PyObject* RRClass_HS(s_RRClass*) {
     return (RRClass_createStatic(RRClass::HS()));
 }
 
-static PyObject* RRClass_NONE(s_RRClass*) {
+PyObject* RRClass_NONE(s_RRClass*) {
     return (RRClass_createStatic(RRClass::NONE()));
 }
 
-static PyObject* RRClass_ANY(s_RRClass*) {
+PyObject* RRClass_ANY(s_RRClass*) {
     return (RRClass_createStatic(RRClass::ANY()));
 }
-// end of RRClass
+
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_InvalidRRClass;
+PyObject* po_IncompleteRRClass;
+
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRClass
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrclass_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.RRClass",
+    sizeof(s_RRClass),                  // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)RRClass_destroy,        // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    RRClass_str,                        // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The RRClass class encapsulates DNS resource record classes.\n"
+    "This class manages the 16-bit integer class codes in quite a straightforward"
+    "way.  The only non trivial task is to handle textual representations of"
+    "RR classes, such as \"IN\", \"CH\", or \"CLASS65534\".",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    (richcmpfunc)RRClass_richcmp,       // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    RRClass_methods,                    // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)RRClass_init,             // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createRRClassObject(const RRClass& source) {
+    RRClassContainer container(PyObject_New(s_RRClass, &rrclass_type));
+    container.set(new RRClass(source));
+    return (container.release());
+}
 
 
-// Module Initialization, all statics are initialized here
 bool
-initModulePart_RRClass(PyObject* mod) {
-    // Add the exceptions to the module
-    po_InvalidRRClass = PyErr_NewException("pydnspp.InvalidRRClass", NULL, NULL);
-    Py_INCREF(po_InvalidRRClass);
-    PyModule_AddObject(mod, "InvalidRRClass", po_InvalidRRClass);
-    po_IncompleteRRClass = PyErr_NewException("pydnspp.IncompleteRRClass", NULL, NULL);
-    Py_INCREF(po_IncompleteRRClass);
-    PyModule_AddObject(mod, "IncompleteRRClass", po_IncompleteRRClass);
-
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&rrclass_type) < 0) {
-        return (false);
+PyRRClass_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
-    Py_INCREF(&rrclass_type);
-    PyModule_AddObject(mod, "RRClass",
-                       reinterpret_cast<PyObject*>(&rrclass_type));
-    
-    return (true);
+    return (PyObject_TypeCheck(obj, &rrclass_type));
 }
+
+const RRClass&
+PyRRClass_ToRRClass(const PyObject* rrclass_obj) {
+    if (rrclass_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in RRClass PyObject conversion");
+    }
+    const s_RRClass* rrclass = static_cast<const s_RRClass*>(rrclass_obj);
+    return (*rrclass->cppobj);
+}
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/rrclass_python.h b/src/lib/dns/python/rrclass_python.h
new file mode 100644
index 0000000..f58bba6
--- /dev/null
+++ b/src/lib/dns/python/rrclass_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRCLASS_H
+#define __PYTHON_RRCLASS_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRClass;
+
+namespace python {
+
+extern PyObject* po_InvalidRRClass;
+extern PyObject* po_IncompleteRRClass;
+
+extern PyTypeObject rrclass_type;
+
+/// This is a simple shortcut to create a python RRClass object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRClassObject(const RRClass& source);
+
+/// \brief Checks if the given python object is a RRClass object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRClass, false otherwise
+bool PyRRClass_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRClass object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type RRClass; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyRRClass_Check()
+///
+/// \note This is not a copy; if the RRClass is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrclass_obj The rrclass object to convert
+const RRClass& PyRRClass_ToRRClass(const PyObject* rrclass_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRCLASS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrset_python.cc b/src/lib/dns/python/rrset_python.cc
index 71a0710..77d520b 100644
--- a/src/lib/dns/python/rrset_python.cc
+++ b/src/lib/dns/python/rrset_python.cc
@@ -12,302 +12,251 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include <dns/rrset.h>
+#include <Python.h>
 
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the module init at the
-// end
-//
-static PyObject* po_EmptyRRset;
+#include <util/python/pycppwrapper_util.h>
 
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
+#include <dns/rrset.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+
+#include "name_python.h"
+#include "pydnspp_common.h"
+#include "rrset_python.h"
+#include "rrclass_python.h"
+#include "rrtype_python.h"
+#include "rrttl_python.h"
+#include "rdata_python.h"
+#include "messagerenderer_python.h"
+
+using namespace std;
 using namespace isc::dns;
+using namespace isc::dns::python;
 using namespace isc::util;
+using namespace isc::util::python;
+
+namespace {
 
-// RRset
+// The s_* Class simply coverst one instantiation of the object
 
 // Using a shared_ptr here should not really be necessary (PyObject
 // is already reference-counted), however internally on the cpp side,
 // not doing so might result in problems, since we can't copy construct
-// rrsets, adding them to messages results in a problem when the
-// message is destroyed or cleared later
+// rdata field, adding them to rrsets results in a problem when the
+// rrset is destroyed later
 class s_RRset : public PyObject {
 public:
-    RRsetPtr rrset;
+    isc::dns::RRsetPtr cppobj;
 };
 
-static int RRset_init(s_RRset* self, PyObject* args);
-static void RRset_destroy(s_RRset* self);
-
-static PyObject* RRset_getRdataCount(s_RRset* self);
-static PyObject* RRset_getName(s_RRset* self);
-static PyObject* RRset_getClass(s_RRset* self);
-static PyObject* RRset_getType(s_RRset* self);
-static PyObject* RRset_getTTL(s_RRset* self);
-static PyObject* RRset_setName(s_RRset* self, PyObject* args);
-static PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
-static PyObject* RRset_toText(s_RRset* self);
-static PyObject* RRset_str(PyObject* self);
-static PyObject* RRset_toWire(s_RRset* self, PyObject* args);
-static PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
-static PyObject* RRset_getRdata(s_RRset* self);
+int RRset_init(s_RRset* self, PyObject* args);
+void RRset_destroy(s_RRset* self);
+
+PyObject* RRset_getRdataCount(PyObject* self, PyObject* args);
+PyObject* RRset_getName(PyObject* self, PyObject* args);
+PyObject* RRset_getClass(PyObject* self, PyObject* args);
+PyObject* RRset_getType(PyObject* self, PyObject* args);
+PyObject* RRset_getTTL(PyObject* self, PyObject* args);
+PyObject* RRset_setName(PyObject* self, PyObject* args);
+PyObject* RRset_setTTL(PyObject* self, PyObject* args);
+PyObject* RRset_toText(PyObject* self, PyObject* args);
+PyObject* RRset_str(PyObject* self);
+PyObject* RRset_toWire(PyObject* self, PyObject* args);
+PyObject* RRset_addRdata(PyObject* self, PyObject* args);
+PyObject* RRset_getRdata(PyObject* po_self, PyObject* args);
+PyObject* RRset_removeRRsig(PyObject* self, PyObject* args);
+
 // TODO: iterator?
 
-static PyMethodDef RRset_methods[] = {
-    { "get_rdata_count", reinterpret_cast<PyCFunction>(RRset_getRdataCount), METH_NOARGS,
+PyMethodDef RRset_methods[] = {
+    { "get_rdata_count", RRset_getRdataCount, METH_NOARGS,
       "Returns the number of rdata fields." },
-    { "get_name", reinterpret_cast<PyCFunction>(RRset_getName), METH_NOARGS,
+    { "get_name", RRset_getName, METH_NOARGS,
       "Returns the name of the RRset, as a Name object." },
-    { "get_class", reinterpret_cast<PyCFunction>(RRset_getClass), METH_NOARGS,
+    { "get_class", RRset_getClass, METH_NOARGS,
       "Returns the class of the RRset as an RRClass object." },
-    { "get_type", reinterpret_cast<PyCFunction>(RRset_getType), METH_NOARGS,
+    { "get_type", RRset_getType, METH_NOARGS,
       "Returns the type of the RRset as an RRType object." },
-    { "get_ttl", reinterpret_cast<PyCFunction>(RRset_getTTL), METH_NOARGS,
+    { "get_ttl", RRset_getTTL, METH_NOARGS,
       "Returns the TTL of the RRset as an RRTTL object." },
-    { "set_name", reinterpret_cast<PyCFunction>(RRset_setName), METH_VARARGS,
+    { "set_name", RRset_setName, METH_VARARGS,
       "Sets the name of the RRset.\nTakes a Name object as an argument." },
-    { "set_ttl", reinterpret_cast<PyCFunction>(RRset_setTTL), METH_VARARGS,
+    { "set_ttl", RRset_setTTL, METH_VARARGS,
       "Sets the TTL of the RRset.\nTakes an RRTTL object as an argument." },
-    { "to_text", reinterpret_cast<PyCFunction>(RRset_toText), METH_NOARGS,
+    { "to_text", RRset_toText, METH_NOARGS,
       "Returns the text representation of the RRset as a string" },
-    { "to_wire", reinterpret_cast<PyCFunction>(RRset_toWire), METH_VARARGS,
+    { "to_wire", RRset_toWire, METH_VARARGS,
       "Converts the RRset object to wire format.\n"
       "The argument can be either a MessageRenderer or an object that "
       "implements the sequence interface. If the object is mutable "
       "(for instance a bytearray()), the wire data is added in-place.\n"
       "If it is not (for instance a bytes() object), a new object is "
       "returned" },
-    { "add_rdata", reinterpret_cast<PyCFunction>(RRset_addRdata), METH_VARARGS,
+    { "add_rdata", RRset_addRdata, METH_VARARGS,
       "Adds the rdata for one RR to the RRset.\nTakes an Rdata object as an argument" },
-    { "get_rdata", reinterpret_cast<PyCFunction>(RRset_getRdata), METH_NOARGS,
+    { "get_rdata", RRset_getRdata, METH_NOARGS,
       "Returns a List containing all Rdata elements" },
+    { "remove_rrsig", RRset_removeRRsig, METH_NOARGS,
+      "Clears the list of RRsigs for this RRset" },
     { NULL, NULL, 0, NULL }
 };
 
-static PyTypeObject rrset_type = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    "pydnspp.RRset",
-    sizeof(s_RRset),                    // tp_basicsize
-    0,                                  // tp_itemsize
-    (destructor)RRset_destroy,          // tp_dealloc
-    NULL,                               // tp_print
-    NULL,                               // tp_getattr
-    NULL,                               // tp_setattr
-    NULL,                               // tp_reserved
-    NULL,                               // tp_repr
-    NULL,                               // tp_as_number
-    NULL,                               // tp_as_sequence
-    NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
-    NULL,                               // tp_call
-    RRset_str,                          // tp_str
-    NULL,                               // tp_getattro
-    NULL,                               // tp_setattro
-    NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
-    "The AbstractRRset class is an abstract base class that "
-    "models a DNS RRset.\n\n"
-    "An object of (a specific derived class of) AbstractRRset "
-    "models an RRset as described in the DNS standard:\n"
-    "A set of DNS resource records (RRs) of the same type and class. "
-    "The standard requires the TTL of all RRs in an RRset be the same; "
-    "this class follows that requirement.\n\n"
-    "Note about duplicate RDATA: RFC2181 states that it's meaningless that an "
-    "RRset contains two identical RRs and that name servers should suppress "
-    "such duplicates.\n"
-    "This class is not responsible for ensuring this requirement: For example, "
-    "addRdata() method doesn't check if there's already RDATA identical "
-    "to the one being added.\n"
-    "This is because such checks can be expensive, and it's often easy to "
-    "ensure the uniqueness requirement at the %data preparation phase "
-    "(e.g. when loading a zone).",
-    NULL,                               // tp_traverse
-    NULL,                               // tp_clear
-    NULL,                               // tp_richcompare
-    0,                                  // tp_weaklistoffset
-    NULL,                               // tp_iter
-    NULL,                               // tp_iternext
-    RRset_methods,                      // tp_methods
-    NULL,                               // tp_members
-    NULL,                               // tp_getset
-    NULL,                               // tp_base
-    NULL,                               // tp_dict
-    NULL,                               // tp_descr_get
-    NULL,                               // tp_descr_set
-    0,                                  // tp_dictoffset
-    (initproc)RRset_init,               // tp_init
-    NULL,                               // tp_alloc
-    PyType_GenericNew,                  // tp_new
-    NULL,                               // tp_free
-    NULL,                               // tp_is_gc
-    NULL,                               // tp_bases
-    NULL,                               // tp_mro
-    NULL,                               // tp_cache
-    NULL,                               // tp_subclasses
-    NULL,                               // tp_weaklist
-    NULL,                               // tp_del
-    0                                   // tp_version_tag
-};
-
-static int
+int
 RRset_init(s_RRset* self, PyObject* args) {
-    s_Name* name;
-    s_RRClass* rrclass;
-    s_RRType* rrtype;
-    s_RRTTL* rrttl;
+    PyObject* name;
+    PyObject* rrclass;
+    PyObject* rrtype;
+    PyObject* rrttl;
 
     if (PyArg_ParseTuple(args, "O!O!O!O!", &name_type, &name,
                                            &rrclass_type, &rrclass,
                                            &rrtype_type, &rrtype,
                                            &rrttl_type, &rrttl
        )) {
-        self->rrset = RRsetPtr(new RRset(*name->cppobj, *rrclass->rrclass,
-                                *rrtype->rrtype, *rrttl->rrttl));
+        self->cppobj = RRsetPtr(new RRset(PyName_ToName(name),
+                                          PyRRClass_ToRRClass(rrclass),
+                                          PyRRType_ToRRType(rrtype),
+                                          PyRRTTL_ToRRTTL(rrttl)));
         return (0);
     }
 
-    self->rrset = RRsetPtr();
+    self->cppobj = RRsetPtr();
     return (-1);
 }
 
-static void
+void
 RRset_destroy(s_RRset* self) {
     // Clear the shared_ptr so that its reference count is zero
     // before we call tp_free() (there is no direct release())
-    self->rrset.reset();
+    self->cppobj.reset();
     Py_TYPE(self)->tp_free(self);
 }
 
-static PyObject*
-RRset_getRdataCount(s_RRset* self) {
-    return (Py_BuildValue("I", self->rrset->getRdataCount()));
+PyObject*
+RRset_getRdataCount(PyObject* self, PyObject*) {
+    return (Py_BuildValue("I", static_cast<const s_RRset*>(self)->cppobj->
+                          getRdataCount()));
 }
 
-static PyObject*
-RRset_getName(s_RRset* self) {
-    s_Name* name;
-
-    // is this the best way to do this?
-    name = static_cast<s_Name*>(name_type.tp_alloc(&name_type, 0));
-    if (name != NULL) {
-        name->cppobj = new Name(self->rrset->getName());
-        if (name->cppobj == NULL)
-          {
-            Py_DECREF(name);
-            return (NULL);
-          }
+PyObject*
+RRset_getName(PyObject* self, PyObject*) {
+    try {
+        return (createNameObject(static_cast<const s_RRset*>(self)->cppobj->
+                                 getName()));
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure getting rrset Name: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting rrset Name");
     }
-
-    return (name);
+    return (NULL);
 }
 
-static PyObject*
-RRset_getClass(s_RRset* self) {
-    s_RRClass* rrclass;
-
-    rrclass = static_cast<s_RRClass*>(rrclass_type.tp_alloc(&rrclass_type, 0));
-    if (rrclass != NULL) {
-        rrclass->rrclass = new RRClass(self->rrset->getClass());
-        if (rrclass->rrclass == NULL)
-          {
-            Py_DECREF(rrclass);
-            return (NULL);
-          }
+PyObject*
+RRset_getClass(PyObject* self, PyObject*) {
+    try {
+        return (createRRClassObject(static_cast<const s_RRset*>(self)->cppobj->
+                                    getClass()));
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure getting question RRClass: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting question RRClass");
     }
-
-    return (rrclass);
+    return (NULL);
 }
 
-static PyObject*
-RRset_getType(s_RRset* self) {
-    s_RRType* rrtype;
-
-    rrtype = static_cast<s_RRType*>(rrtype_type.tp_alloc(&rrtype_type, 0));
-    if (rrtype != NULL) {
-        rrtype->rrtype = new RRType(self->rrset->getType());
-        if (rrtype->rrtype == NULL)
-          {
-            Py_DECREF(rrtype);
-            return (NULL);
-          }
+PyObject*
+RRset_getType(PyObject* self, PyObject*) {
+    try {
+        return (createRRTypeObject(static_cast<const s_RRset*>(self)->cppobj->
+                                   getType()));
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure getting question RRType: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting question RRType");
     }
-
-    return (rrtype);
+    return (NULL);
 }
 
-static PyObject*
-RRset_getTTL(s_RRset* self) {
-    s_RRTTL* rrttl;
-
-    rrttl = static_cast<s_RRTTL*>(rrttl_type.tp_alloc(&rrttl_type, 0));
-    if (rrttl != NULL) {
-        rrttl->rrttl = new RRTTL(self->rrset->getTTL());
-        if (rrttl->rrttl == NULL)
-          {
-            Py_DECREF(rrttl);
-            return (NULL);
-          }
+PyObject*
+RRset_getTTL(PyObject* self, PyObject*) {
+    try {
+        return (createRRTTLObject(static_cast<const s_RRset*>(self)->cppobj->
+                                  getTTL()));
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure getting question TTL: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting question TTL");
     }
-
-    return (rrttl);
+    return (NULL);
 }
 
-static PyObject*
-RRset_setName(s_RRset* self, PyObject* args) {
-    s_Name* name;
+PyObject*
+RRset_setName(PyObject* self, PyObject* args) {
+    PyObject* name;
     if (!PyArg_ParseTuple(args, "O!", &name_type, &name)) {
         return (NULL);
     }
-    self->rrset->setName(*name->cppobj);
+    static_cast<s_RRset*>(self)->cppobj->setName(PyName_ToName(name));
     Py_RETURN_NONE;
 }
 
-static PyObject*
-RRset_setTTL(s_RRset* self, PyObject* args) {
-    s_RRTTL* rrttl;
+PyObject*
+RRset_setTTL(PyObject* self, PyObject* args) {
+    PyObject* rrttl;
     if (!PyArg_ParseTuple(args, "O!", &rrttl_type, &rrttl)) {
         return (NULL);
     }
-    self->rrset->setTTL(*rrttl->rrttl);
+    static_cast<s_RRset*>(self)->cppobj->setTTL(PyRRTTL_ToRRTTL(rrttl));
     Py_RETURN_NONE;
 }
 
-static PyObject*
-RRset_toText(s_RRset* self) {
+PyObject*
+RRset_toText(PyObject* self, PyObject*) {
     try {
-        return (Py_BuildValue("s", self->rrset->toText().c_str()));
+        return (Py_BuildValue("s", static_cast<const s_RRset*>(self)->cppobj->
+                              toText().c_str()));
     } catch (const EmptyRRset& ers) {
         PyErr_SetString(po_EmptyRRset, ers.what());
         return (NULL);
     }
 }
 
-static PyObject*
+PyObject*
 RRset_str(PyObject* self) {
     // Simply call the to_text method we already defined
     return (PyObject_CallMethod(self,
-                               const_cast<char*>("to_text"),
+                                const_cast<char*>("to_text"),
                                 const_cast<char*>("")));
 }
 
-static PyObject*
-RRset_toWire(s_RRset* self, PyObject* args) {
+PyObject*
+RRset_toWire(PyObject* self_p, PyObject* args) {
     PyObject* bytes;
-    s_MessageRenderer* mr;
+    PyObject* mr;
+    const s_RRset* self(static_cast<const s_RRset*>(self_p));
 
     try {
         if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
             PyObject* bytes_o = bytes;
-            
+
             OutputBuffer buffer(4096);
-            self->rrset->toWire(buffer);
+            self->cppobj->toWire(buffer);
             PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
             PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
             // We need to release the object we temporarily created here
@@ -315,7 +264,7 @@ RRset_toWire(s_RRset* self, PyObject* args) {
             Py_DECREF(n);
             return (result);
         } else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
-            self->rrset->toWire(*mr->messagerenderer);
+            self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
             // If we return NULL it is seen as an error, so use this for
             // None returns
             Py_RETURN_NONE;
@@ -331,14 +280,14 @@ RRset_toWire(s_RRset* self, PyObject* args) {
     return (NULL);
 }
 
-static PyObject*
-RRset_addRdata(s_RRset* self, PyObject* args) {
-    s_Rdata* rdata;
+PyObject*
+RRset_addRdata(PyObject* self, PyObject* args) {
+    PyObject* rdata;
     if (!PyArg_ParseTuple(args, "O!", &rdata_type, &rdata)) {
         return (NULL);
     }
     try {
-        self->rrset->addRdata(*rdata->rdata);
+        static_cast<s_RRset*>(self)->cppobj->addRdata(PyRdata_ToRdata(rdata));
         Py_RETURN_NONE;
     } catch (const std::bad_cast&) {
         PyErr_Clear();
@@ -348,55 +297,176 @@ RRset_addRdata(s_RRset* self, PyObject* args) {
     }
 }
 
-static PyObject*
-RRset_getRdata(s_RRset* self) {
-    PyObject* list = PyList_New(0);
-
-    RdataIteratorPtr it = self->rrset->getRdataIterator();
-
-    for (; !it->isLast(); it->next()) {
-        s_Rdata *rds = static_cast<s_Rdata*>(rdata_type.tp_alloc(&rdata_type, 0));
-        if (rds != NULL) {
-            // hmz them iterators/shared_ptrs and private constructors
-            // make this a bit weird, so we create a new one with
-            // the data available
-            const Rdata *rd = &it->getCurrent();
-            rds->rdata = createRdata(self->rrset->getType(), self->rrset->getClass(), *rd);
-            PyList_Append(list, rds);
-        } else {
-            return (NULL);
+PyObject*
+RRset_getRdata(PyObject* po_self, PyObject*) {
+    const s_RRset* const self = static_cast<s_RRset*>(po_self);
+
+    try {
+        PyObjectContainer list_container(PyList_New(0));
+
+        for (RdataIteratorPtr it = self->cppobj->getRdataIterator();
+             !it->isLast(); it->next()) {
+            if (PyList_Append(list_container.get(),
+                              PyObjectContainer(
+                                  createRdataObject(
+                                      createRdata(self->cppobj->getType(),
+                                                  self->cppobj->getClass(),
+                                                  it->getCurrent()))).get())
+                == -1) {
+                isc_throw(PyCPPWrapperException, "PyList_Append failed, "
+                          "probably due to short memory");
+            }
         }
+        return (list_container.release());
+    } catch (const exception& ex) {
+        const string ex_what =
+            "Unexpected failure getting rrset Rdata: " +
+            string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure getting rrset Rdata");
     }
-    
-    return (list);
+    return (NULL);
 }
 
-// end of RRset
+PyObject*
+RRset_removeRRsig(PyObject* self, PyObject*) {
+    static_cast<s_RRset*>(self)->cppobj->removeRRsig();
+    Py_RETURN_NONE;
+}
 
+} // end of unnamed namespace
 
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_RRset(PyObject* mod) {
-    // Add the exceptions to the module
-    po_EmptyRRset = PyErr_NewException("pydnspp.EmptyRRset", NULL, NULL);
-    PyModule_AddObject(mod, "EmptyRRset", po_EmptyRRset);
+namespace isc {
+namespace dns {
+namespace python {
 
-    // Add the enums to the module
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the module init at the
+// end
+//
+PyObject* po_EmptyRRset;
 
-    // Add the constants to the module
+PyTypeObject rrset_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.RRset",
+    sizeof(s_RRset),                    // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)RRset_destroy,          // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    RRset_str,                          // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The AbstractRRset class is an abstract base class that "
+    "models a DNS RRset.\n\n"
+    "An object of (a specific derived class of) AbstractRRset "
+    "models an RRset as described in the DNS standard:\n"
+    "A set of DNS resource records (RRs) of the same type and class. "
+    "The standard requires the TTL of all RRs in an RRset be the same; "
+    "this class follows that requirement.\n\n"
+    "Note about duplicate RDATA: RFC2181 states that it's meaningless that an "
+    "RRset contains two identical RRs and that name servers should suppress "
+    "such duplicates.\n"
+    "This class is not responsible for ensuring this requirement: For example, "
+    "addRdata() method doesn't check if there's already RDATA identical "
+    "to the one being added.\n"
+    "This is because such checks can be expensive, and it's often easy to "
+    "ensure the uniqueness requirement at the %data preparation phase "
+    "(e.g. when loading a zone).",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    RRset_methods,                      // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)RRset_init,               // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createRRsetObject(const RRset& source) {
 
-    // Add the classes to the module
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module
+    // RRsets are noncopyable, so as a workaround we recreate a new one
+    // and copy over all content
+    RRsetPtr new_rrset = isc::dns::RRsetPtr(
+        new isc::dns::RRset(source.getName(), source.getClass(),
+                            source.getType(), source.getTTL()));
 
-    // NameComparisonResult
-    if (PyType_Ready(&rrset_type) < 0) {
-        return (false);
+    isc::dns::RdataIteratorPtr rdata_it(source.getRdataIterator());
+    for (rdata_it->first(); !rdata_it->isLast(); rdata_it->next()) {
+        new_rrset->addRdata(rdata_it->getCurrent());
+    }
+
+    isc::dns::RRsetPtr sigs = source.getRRsig();
+    if (sigs) {
+        new_rrset->addRRsig(sigs);
+    }
+    s_RRset* py_rrset =
+        static_cast<s_RRset*>(rrset_type.tp_alloc(&rrset_type, 0));
+    if (py_rrset == NULL) {
+        isc_throw(PyCPPWrapperException, "Unexpected NULL C++ object, "
+                  "probably due to short memory");
     }
-    Py_INCREF(&rrset_type);
-    PyModule_AddObject(mod, "RRset",
-                       reinterpret_cast<PyObject*>(&rrset_type));
-    
-    return (true);
+    py_rrset->cppobj = new_rrset;
+    return (py_rrset);
 }
 
+bool
+PyRRset_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &rrset_type));
+}
+
+RRset&
+PyRRset_ToRRset(PyObject* rrset_obj) {
+    s_RRset* rrset = static_cast<s_RRset*>(rrset_obj);
+    return (*rrset->cppobj);
+}
+
+RRsetPtr
+PyRRset_ToRRsetPtr(PyObject* rrset_obj) {
+    if (rrset_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in RRset PyObject conversion");
+    }
+    s_RRset* rrset = static_cast<s_RRset*>(rrset_obj);
+    return (rrset->cppobj);
+}
+
+
+} // end python namespace
+} // end dns namespace
+} // end isc namespace
diff --git a/src/lib/dns/python/rrset_python.h b/src/lib/dns/python/rrset_python.h
new file mode 100644
index 0000000..4268678
--- /dev/null
+++ b/src/lib/dns/python/rrset_python.h
@@ -0,0 +1,78 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRSET_H
+#define __PYTHON_RRSET_H 1
+
+#include <Python.h>
+
+#include <dns/rrset.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+namespace isc {
+namespace dns {
+namespace python {
+
+extern PyObject* po_EmptyRRset;
+
+extern PyTypeObject rrset_type;
+
+/// This is a simple shortcut to create a python RRset object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRsetObject(const RRset& source);
+
+/// \brief Checks if the given python object is a RRset object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRset, false otherwise
+bool PyRRset_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRset object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type RRset; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyRRset_Check()
+///
+/// \note This is not a copy; if the RRset is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrset_obj The rrset object to convert
+RRset& PyRRset_ToRRset(PyObject* rrset_obj);
+
+/// \brief Returns the shared_ptr of the RRset object contained within the
+///        given Python object.
+///
+/// \note The given object MUST be of type RRset; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyRRset_Check()
+///
+/// \param rrset_obj The rrset object to convert
+RRsetPtr PyRRset_ToRRsetPtr(PyObject* rrset_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRSET_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrttl_python.cc b/src/lib/dns/python/rrttl_python.cc
index c4b25bf..3a3f067 100644
--- a/src/lib/dns/python/rrttl_python.cc
+++ b/src/lib/dns/python/rrttl_python.cc
@@ -12,57 +12,41 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <Python.h>
 #include <vector>
 
 #include <dns/rrttl.h>
+#include <dns/messagerenderer.h>
+#include <util/buffer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rrttl_python.h"
+#include "pydnspp_common.h"
+#include "messagerenderer_python.h"
 
 using namespace std;
 using namespace isc::dns;
+using namespace isc::dns::python;
 using namespace isc::util;
+using namespace isc::util::python;
 
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRTTL;
-static PyObject* po_IncompleteRRTTL;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// RRTTL
-//
-
+namespace {
 // The s_* Class simply covers one instantiation of the object
 class s_RRTTL : public PyObject {
 public:
-    RRTTL* rrttl;
+    s_RRTTL() : cppobj(NULL) {};
+    isc::dns::RRTTL* cppobj;
 };
 
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
-// General creation and destruction
-static int RRTTL_init(s_RRTTL* self, PyObject* args);
-static void RRTTL_destroy(s_RRTTL* self);
+typedef CPPPyObjectContainer<s_RRTTL, RRTTL> RRTTLContainer;
 
-// These are the functions we export
-static PyObject* RRTTL_toText(s_RRTTL* self);
+PyObject* RRTTL_toText(s_RRTTL* self);
 // This is a second version of toText, we need one where the argument
 // is a PyObject*, for the str() function in python.
-static PyObject* RRTTL_str(PyObject* self);
-static PyObject* RRTTL_toWire(s_RRTTL* self, PyObject* args);
-static PyObject* RRTTL_getValue(s_RRTTL* self);
-static PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
+PyObject* RRTTL_str(PyObject* self);
+PyObject* RRTTL_toWire(s_RRTTL* self, PyObject* args);
+PyObject* RRTTL_getValue(s_RRTTL* self);
+PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
 
 // This list contains the actual set of functions we have in
 // python. Each entry has
@@ -70,7 +54,7 @@ static PyObject* RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op);
 // 2. Our static function here
 // 3. Argument type
 // 4. Documentation
-static PyMethodDef RRTTL_methods[] = {
+PyMethodDef RRTTL_methods[] = {
     { "to_text", reinterpret_cast<PyCFunction>(RRTTL_toText), METH_NOARGS,
       "Returns the string representation" },
     { "to_wire", reinterpret_cast<PyCFunction>(RRTTL_toWire), METH_VARARGS,
@@ -85,65 +69,7 @@ static PyMethodDef RRTTL_methods[] = {
     { NULL, NULL, 0, NULL }
 };
 
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRTTL
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrttl_type = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    "pydnspp.RRTTL",
-    sizeof(s_RRTTL),                    // tp_basicsize
-    0,                                  // tp_itemsize
-    (destructor)RRTTL_destroy,          // tp_dealloc
-    NULL,                               // tp_print
-    NULL,                               // tp_getattr
-    NULL,                               // tp_setattr
-    NULL,                               // tp_reserved
-    NULL,                               // tp_repr
-    NULL,                               // tp_as_number
-    NULL,                               // tp_as_sequence
-    NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
-    NULL,                               // tp_call
-    RRTTL_str,                          // tp_str
-    NULL,                               // tp_getattro
-    NULL,                               // tp_setattro
-    NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
-    "The RRTTL class encapsulates TTLs used in DNS resource records.\n\n"
-    "This is a straightforward class; an RRTTL object simply maintains a "
-    "32-bit unsigned integer corresponding to the TTL value.  The main purpose "
-    "of this class is to provide convenient interfaces to convert a textual "
-    "representation into the integer TTL value and vice versa, and to handle "
-    "wire-format representations.",
-    NULL,                               // tp_traverse
-    NULL,                               // tp_clear
-    (richcmpfunc)RRTTL_richcmp,         // tp_richcompare
-    0,                                  // tp_weaklistoffset
-    NULL,                               // tp_iter
-    NULL,                               // tp_iternext
-    RRTTL_methods,                      // tp_methods
-    NULL,                               // tp_members
-    NULL,                               // tp_getset
-    NULL,                               // tp_base
-    NULL,                               // tp_dict
-    NULL,                               // tp_descr_get
-    NULL,                               // tp_descr_set
-    0,                                  // tp_dictoffset
-    (initproc)RRTTL_init,               // tp_init
-    NULL,                               // tp_alloc
-    PyType_GenericNew,                  // tp_new
-    NULL,                               // tp_free
-    NULL,                               // tp_is_gc
-    NULL,                               // tp_bases
-    NULL,                               // tp_mro
-    NULL,                               // tp_cache
-    NULL,                               // tp_subclasses
-    NULL,                               // tp_weaklist
-    NULL,                               // tp_del
-    0                                   // tp_version_tag
-};
-
-static int
+int
 RRTTL_init(s_RRTTL* self, PyObject* args) {
     const char* s;
     long long i;
@@ -157,7 +83,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
     // (the way to do exceptions is to set PyErr and return -1)
     try {
         if (PyArg_ParseTuple(args, "s", &s)) {
-            self->rrttl = new RRTTL(s);
+            self->cppobj = new RRTTL(s);
             return (0);
         } else if (PyArg_ParseTuple(args, "L", &i)) {
             PyErr_Clear();
@@ -165,7 +91,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
                 PyErr_SetString(PyExc_ValueError, "RR TTL number out of range");
                 return (-1);
             }
-            self->rrttl = new RRTTL(i);
+            self->cppobj = new RRTTL(i);
             return (0);
         } else if (PyArg_ParseTuple(args, "O", &bytes) &&
                    PySequence_Check(bytes)) {
@@ -176,7 +102,7 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
                 return (result);
             }
             InputBuffer ib(&data[0], size);
-            self->rrttl = new RRTTL(ib);
+            self->cppobj = new RRTTL(ib);
             PyErr_Clear();
             return (0);
         }
@@ -200,20 +126,20 @@ RRTTL_init(s_RRTTL* self, PyObject* args) {
     return (-1);
 }
 
-static void
+void
 RRTTL_destroy(s_RRTTL* self) {
-    delete self->rrttl;
-    self->rrttl = NULL;
+    delete self->cppobj;
+    self->cppobj = NULL;
     Py_TYPE(self)->tp_free(self);
 }
 
-static PyObject*
+PyObject*
 RRTTL_toText(s_RRTTL* self) {
     // Py_BuildValue makes python objects from native data
-    return (Py_BuildValue("s", self->rrttl->toText().c_str()));
+    return (Py_BuildValue("s", self->cppobj->toText().c_str()));
 }
 
-static PyObject*
+PyObject*
 RRTTL_str(PyObject* self) {
     // Simply call the to_text method we already defined
     return (PyObject_CallMethod(self,
@@ -221,16 +147,16 @@ RRTTL_str(PyObject* self) {
                                 const_cast<char*>("")));
 }
 
-static PyObject*
+PyObject*
 RRTTL_toWire(s_RRTTL* self, PyObject* args) {
     PyObject* bytes;
-    s_MessageRenderer* mr;
-    
+    PyObject* mr;
+
     if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
         PyObject* bytes_o = bytes;
-        
+
         OutputBuffer buffer(4);
-        self->rrttl->toWire(buffer);
+        self->cppobj->toWire(buffer);
         PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()),
                                                 buffer.getLength());
         PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
@@ -239,7 +165,7 @@ RRTTL_toWire(s_RRTTL* self, PyObject* args) {
         Py_DECREF(n);
         return (result);
     } else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
-        self->rrttl->toWire(*mr->messagerenderer);
+        self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
         // If we return NULL it is seen as an error, so use this for
         // None returns
         Py_RETURN_NONE;
@@ -250,12 +176,12 @@ RRTTL_toWire(s_RRTTL* self, PyObject* args) {
     return (NULL);
 }
 
-static PyObject*
+PyObject*
 RRTTL_getValue(s_RRTTL* self) {
-    return (Py_BuildValue("I", self->rrttl->getValue()));
+    return (Py_BuildValue("I", self->cppobj->getValue()));
 }
 
-static PyObject* 
+PyObject*
 RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
     bool c = false;
 
@@ -267,24 +193,24 @@ RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
 
     switch (op) {
     case Py_LT:
-        c = *self->rrttl < *other->rrttl;
+        c = *self->cppobj < *other->cppobj;
         break;
     case Py_LE:
-        c = *self->rrttl < *other->rrttl ||
-            *self->rrttl == *other->rrttl;
+        c = *self->cppobj < *other->cppobj ||
+            *self->cppobj == *other->cppobj;
         break;
     case Py_EQ:
-        c = *self->rrttl == *other->rrttl;
+        c = *self->cppobj == *other->cppobj;
         break;
     case Py_NE:
-        c = *self->rrttl != *other->rrttl;
+        c = *self->cppobj != *other->cppobj;
         break;
     case Py_GT:
-        c = *other->rrttl < *self->rrttl;
+        c = *other->cppobj < *self->cppobj;
         break;
     case Py_GE:
-        c = *other->rrttl < *self->rrttl ||
-            *self->rrttl == *other->rrttl;
+        c = *other->cppobj < *self->cppobj ||
+            *self->cppobj == *other->cppobj;
         break;
     }
     if (c)
@@ -292,27 +218,104 @@ RRTTL_richcmp(s_RRTTL* self, s_RRTTL* other, int op) {
     else
         Py_RETURN_FALSE;
 }
-// end of RRTTL
 
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+
+//
+// Declaration of the custom exceptions
+// Initialization and addition of these go in the initModulePart
+// function in pydnspp.cc
+//
+PyObject* po_InvalidRRTTL;
+PyObject* po_IncompleteRRTTL;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRTTL
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrttl_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.RRTTL",
+    sizeof(s_RRTTL),                    // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)RRTTL_destroy,          // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    RRTTL_str,                          // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The RRTTL class encapsulates TTLs used in DNS resource records.\n\n"
+    "This is a straightforward class; an RRTTL object simply maintains a "
+    "32-bit unsigned integer corresponding to the TTL value.  The main purpose "
+    "of this class is to provide convenient interfaces to convert a textual "
+    "representation into the integer TTL value and vice versa, and to handle "
+    "wire-format representations.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    (richcmpfunc)RRTTL_richcmp,         // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    RRTTL_methods,                      // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)RRTTL_init,               // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createRRTTLObject(const RRTTL& source) {
+    RRTTLContainer container(PyObject_New(s_RRTTL, &rrttl_type));
+    container.set(new RRTTL(source));
+    return (container.release());
+}
 
-// Module Initialization, all statics are initialized here
 bool
-initModulePart_RRTTL(PyObject* mod) {
-    // Add the exceptions to the module
-    po_InvalidRRTTL = PyErr_NewException("pydnspp.InvalidRRTTL", NULL, NULL);
-    PyModule_AddObject(mod, "InvalidRRTTL", po_InvalidRRTTL);
-    po_IncompleteRRTTL = PyErr_NewException("pydnspp.IncompleteRRTTL", NULL, NULL);
-    PyModule_AddObject(mod, "IncompleteRRTTL", po_IncompleteRRTTL);
+PyRRTTL_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &rrttl_type));
+}
 
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&rrttl_type) < 0) {
-        return (false);
+const RRTTL&
+PyRRTTL_ToRRTTL(const PyObject* rrttl_obj) {
+    if (rrttl_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in RRTTL PyObject conversion");
     }
-    Py_INCREF(&rrttl_type);
-    PyModule_AddObject(mod, "RRTTL",
-                       reinterpret_cast<PyObject*>(&rrttl_type));
-    
-    return (true);
+    const s_RRTTL* rrttl = static_cast<const s_RRTTL*>(rrttl_obj);
+    return (*rrttl->cppobj);
 }
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/rrttl_python.h b/src/lib/dns/python/rrttl_python.h
new file mode 100644
index 0000000..9dbc982
--- /dev/null
+++ b/src/lib/dns/python/rrttl_python.h
@@ -0,0 +1,67 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRTTL_H
+#define __PYTHON_RRTTL_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRTTL;
+
+namespace python {
+
+extern PyObject* po_InvalidRRTTL;
+extern PyObject* po_IncompleteRRTTL;
+
+extern PyTypeObject rrttl_type;
+
+/// This is a simple shortcut to create a python RRTTL object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRTTLObject(const RRTTL& source);
+
+/// \brief Checks if the given python object is a RRTTL object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRTTL, false otherwise
+bool PyRRTTL_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRTTL object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type RRTTL; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyRRTTL_Check()
+///
+/// \note This is not a copy; if the RRTTL is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrttl_obj The rrttl object to convert
+const RRTTL& PyRRTTL_ToRRTTL(const PyObject* rrttl_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRTTL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/rrtype_python.cc b/src/lib/dns/python/rrtype_python.cc
index 00e0acd..bf20b7c 100644
--- a/src/lib/dns/python/rrtype_python.cc
+++ b/src/lib/dns/python/rrtype_python.cc
@@ -12,77 +12,64 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <Python.h>
 #include <vector>
 
 #include <dns/rrtype.h>
+#include <dns/messagerenderer.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "rrtype_python.h"
+#include "messagerenderer_python.h"
+#include "pydnspp_common.h"
 
 using namespace std;
 using namespace isc::dns;
+using namespace isc::dns::python;
 using namespace isc::util;
+using namespace isc::util::python;
 
-//
-// Declaration of the custom exceptions
-// Initialization and addition of these go in the initModulePart
-// function at the end of this file
-//
-static PyObject* po_InvalidRRType;
-static PyObject* po_IncompleteRRType;
-
-//
-// Definition of the classes
-//
-
-// For each class, we need a struct, a helper functions (init, destroy,
-// and static wrappers around the methods we export), a list of methods,
-// and a type description
-
-//
-// RRType
-//
-
+namespace {
 // The s_* Class simply covers one instantiation of the object
 class s_RRType : public PyObject {
 public:
-    const RRType* rrtype;
+    const RRType* cppobj;
 };
 
-//
-// We declare the functions here, the definitions are below
-// the type definition of the object, since both can use the other
-//
-
 // General creation and destruction
-static int RRType_init(s_RRType* self, PyObject* args);
-static void RRType_destroy(s_RRType* self);
+int RRType_init(s_RRType* self, PyObject* args);
+void RRType_destroy(s_RRType* self);
 
 // These are the functions we export
-static PyObject*
+PyObject*
 RRType_toText(s_RRType* self);
 // This is a second version of toText, we need one where the argument
 // is a PyObject*, for the str() function in python.
-static PyObject* RRType_str(PyObject* self);
-static PyObject* RRType_toWire(s_RRType* self, PyObject* args);
-static PyObject* RRType_getCode(s_RRType* self);
-static PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
-static PyObject* RRType_NSEC3PARAM(s_RRType *self);
-static PyObject* RRType_DNAME(s_RRType *self);
-static PyObject* RRType_PTR(s_RRType *self);
-static PyObject* RRType_MX(s_RRType *self);
-static PyObject* RRType_DNSKEY(s_RRType *self);
-static PyObject* RRType_TXT(s_RRType *self);
-static PyObject* RRType_RRSIG(s_RRType *self);
-static PyObject* RRType_NSEC(s_RRType *self);
-static PyObject* RRType_AAAA(s_RRType *self);
-static PyObject* RRType_DS(s_RRType *self);
-static PyObject* RRType_OPT(s_RRType *self);
-static PyObject* RRType_A(s_RRType *self);
-static PyObject* RRType_NS(s_RRType *self);
-static PyObject* RRType_CNAME(s_RRType *self);
-static PyObject* RRType_SOA(s_RRType *self);
-static PyObject* RRType_NSEC3(s_RRType *self);
-static PyObject* RRType_IXFR(s_RRType *self);
-static PyObject* RRType_AXFR(s_RRType *self);
-static PyObject* RRType_ANY(s_RRType *self);
+PyObject* RRType_str(PyObject* self);
+PyObject* RRType_toWire(s_RRType* self, PyObject* args);
+PyObject* RRType_getCode(s_RRType* self);
+PyObject* RRType_richcmp(s_RRType* self, s_RRType* other, int op);
+PyObject* RRType_NSEC3PARAM(s_RRType *self);
+PyObject* RRType_DNAME(s_RRType *self);
+PyObject* RRType_PTR(s_RRType *self);
+PyObject* RRType_MX(s_RRType *self);
+PyObject* RRType_DNSKEY(s_RRType *self);
+PyObject* RRType_TXT(s_RRType *self);
+PyObject* RRType_RRSIG(s_RRType *self);
+PyObject* RRType_NSEC(s_RRType *self);
+PyObject* RRType_AAAA(s_RRType *self);
+PyObject* RRType_DS(s_RRType *self);
+PyObject* RRType_OPT(s_RRType *self);
+PyObject* RRType_A(s_RRType *self);
+PyObject* RRType_NS(s_RRType *self);
+PyObject* RRType_CNAME(s_RRType *self);
+PyObject* RRType_SOA(s_RRType *self);
+PyObject* RRType_NSEC3(s_RRType *self);
+PyObject* RRType_IXFR(s_RRType *self);
+PyObject* RRType_AXFR(s_RRType *self);
+PyObject* RRType_ANY(s_RRType *self);
+
+typedef CPPPyObjectContainer<s_RRType, RRType> RRTypeContainer;
 
 // This list contains the actual set of functions we have in
 // python. Each entry has
@@ -90,7 +77,7 @@ static PyObject* RRType_ANY(s_RRType *self);
 // 2. Our static function here
 // 3. Argument type
 // 4. Documentation
-static PyMethodDef RRType_methods[] = {
+PyMethodDef RRType_methods[] = {
     { "to_text", reinterpret_cast<PyCFunction>(RRType_toText), METH_NOARGS,
       "Returns the string representation" },
     { "to_wire", reinterpret_cast<PyCFunction>(RRType_toWire), METH_VARARGS,
@@ -124,63 +111,7 @@ static PyMethodDef RRType_methods[] = {
     { NULL, NULL, 0, NULL }
 };
 
-// This defines the complete type for reflection in python and
-// parsing of PyObject* to s_RRType
-// Most of the functions are not actually implemented and NULL here.
-static PyTypeObject rrtype_type = {
-    PyVarObject_HEAD_INIT(NULL, 0)
-    "pydnspp.RRType",
-    sizeof(s_RRType),                   // tp_basicsize
-    0,                                  // tp_itemsize
-    (destructor)RRType_destroy,         // tp_dealloc
-    NULL,                               // tp_print
-    NULL,                               // tp_getattr
-    NULL,                               // tp_setattr
-    NULL,                               // tp_reserved
-    NULL,                               // tp_repr
-    NULL,                               // tp_as_number
-    NULL,                               // tp_as_sequence
-    NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
-    NULL,                               // tp_call
-    RRType_str,                         // tp_str
-    NULL,                               // tp_getattro
-    NULL,                               // tp_setattro
-    NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
-    "The RRType class encapsulates DNS resource record types.\n\n"
-    "This class manages the 16-bit integer type codes in quite a straightforward "
-    "way. The only non trivial task is to handle textual representations of "
-    "RR types, such as \"A\", \"AAAA\", or \"TYPE65534\".",
-    NULL,                               // tp_traverse
-    NULL,                               // tp_clear
-    (richcmpfunc)RRType_richcmp,        // tp_richcompare
-    0,                                  // tp_weaklistoffset
-    NULL,                               // tp_iter
-    NULL,                               // tp_iternext
-    RRType_methods,                     // tp_methods
-    NULL,                               // tp_members
-    NULL,                               // tp_getset
-    NULL,                               // tp_base
-    NULL,                               // tp_dict
-    NULL,                               // tp_descr_get
-    NULL,                               // tp_descr_set
-    0,                                  // tp_dictoffset
-    (initproc)RRType_init,              // tp_init
-    NULL,                               // tp_alloc
-    PyType_GenericNew,                  // tp_new
-    NULL,                               // tp_free
-    NULL,                               // tp_is_gc
-    NULL,                               // tp_bases
-    NULL,                               // tp_mro
-    NULL,                               // tp_cache
-    NULL,                               // tp_subclasses
-    NULL,                               // tp_weaklist
-    NULL,                               // tp_del
-    0                                   // tp_version_tag
-};
-
-static int
+int
 RRType_init(s_RRType* self, PyObject* args) {
     const char* s;
     long i;
@@ -194,7 +125,7 @@ RRType_init(s_RRType* self, PyObject* args) {
     // (the way to do exceptions is to set PyErr and return -1)
     try {
         if (PyArg_ParseTuple(args, "s", &s)) {
-            self->rrtype = new RRType(s);
+            self->cppobj = new RRType(s);
             return (0);
         } else if (PyArg_ParseTuple(args, "l", &i)) {
             PyErr_Clear();
@@ -202,7 +133,7 @@ RRType_init(s_RRType* self, PyObject* args) {
                 PyErr_SetString(PyExc_ValueError, "RR Type number out of range");
                 return (-1);
             }
-            self->rrtype = new RRType(i);
+            self->cppobj = new RRType(i);
             return (0);
         } else if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
             Py_ssize_t size = PySequence_Size(bytes);
@@ -212,7 +143,7 @@ RRType_init(s_RRType* self, PyObject* args) {
                 return (result);
             }
             InputBuffer ib(&data[0], size);
-            self->rrtype = new RRType(ib);
+            self->cppobj = new RRType(ib);
             PyErr_Clear();
             return (0);
         }
@@ -236,36 +167,36 @@ RRType_init(s_RRType* self, PyObject* args) {
     return (-1);
 }
 
-static void
+void
 RRType_destroy(s_RRType* self) {
-    delete self->rrtype;
-    self->rrtype = NULL;
+    delete self->cppobj;
+    self->cppobj = NULL;
     Py_TYPE(self)->tp_free(self);
 }
 
-static PyObject*
+PyObject*
 RRType_toText(s_RRType* self) {
     // Py_BuildValue makes python objects from native data
-    return (Py_BuildValue("s", self->rrtype->toText().c_str()));
+    return (Py_BuildValue("s", self->cppobj->toText().c_str()));
 }
 
-static PyObject*
+PyObject*
 RRType_str(PyObject* self) {
     // Simply call the to_text method we already defined
     return (PyObject_CallMethod(self, const_cast<char*>("to_text"),
                                 const_cast<char*>("")));
 }
 
-static PyObject*
+PyObject*
 RRType_toWire(s_RRType* self, PyObject* args) {
     PyObject* bytes;
-    s_MessageRenderer* mr;
+    PyObject* mr;
 
     if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
         PyObject* bytes_o = bytes;
 
         OutputBuffer buffer(2);
-        self->rrtype->toWire(buffer);
+        self->cppobj->toWire(buffer);
         PyObject* n = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
         PyObject* result = PySequence_InPlaceConcat(bytes_o, n);
         // We need to release the object we temporarily created here
@@ -273,7 +204,7 @@ RRType_toWire(s_RRType* self, PyObject* args) {
         Py_DECREF(n);
         return (result);
     } else if (PyArg_ParseTuple(args, "O!", &messagerenderer_type, &mr)) {
-        self->rrtype->toWire(*mr->messagerenderer);
+        self->cppobj->toWire(PyMessageRenderer_ToMessageRenderer(mr));
         // If we return NULL it is seen as an error, so use this for
         // None returns
         Py_RETURN_NONE;
@@ -284,12 +215,12 @@ RRType_toWire(s_RRType* self, PyObject* args) {
     return (NULL);
 }
 
-static PyObject*
+PyObject*
 RRType_getCode(s_RRType* self) {
-    return (Py_BuildValue("I", self->rrtype->getCode()));
+    return (Py_BuildValue("I", self->cppobj->getCode()));
 }
 
-static PyObject* 
+PyObject*
 RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
     bool c;
 
@@ -301,24 +232,24 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
 
     switch (op) {
     case Py_LT:
-        c = *self->rrtype < *other->rrtype;
+        c = *self->cppobj < *other->cppobj;
         break;
     case Py_LE:
-        c = *self->rrtype < *other->rrtype ||
-            *self->rrtype == *other->rrtype;
+        c = *self->cppobj < *other->cppobj ||
+            *self->cppobj == *other->cppobj;
         break;
     case Py_EQ:
-        c = *self->rrtype == *other->rrtype;
+        c = *self->cppobj == *other->cppobj;
         break;
     case Py_NE:
-        c = *self->rrtype != *other->rrtype;
+        c = *self->cppobj != *other->cppobj;
         break;
     case Py_GT:
-        c = *other->rrtype < *self->rrtype;
+        c = *other->cppobj < *self->cppobj;
         break;
     case Py_GE:
-        c = *other->rrtype < *self->rrtype ||
-            *self->rrtype == *other->rrtype;
+        c = *other->cppobj < *self->cppobj ||
+            *self->cppobj == *other->cppobj;
         break;
     default:
         PyErr_SetString(PyExc_IndexError,
@@ -334,131 +265,200 @@ RRType_richcmp(s_RRType* self, s_RRType* other, int op) {
 //
 // Common function for RRType_A/NS/etc.
 //
-static PyObject* RRType_createStatic(RRType stc) {
+PyObject* RRType_createStatic(RRType stc) {
     s_RRType* ret = PyObject_New(s_RRType, &rrtype_type);
     if (ret != NULL) {
-        ret->rrtype = new RRType(stc);
+        ret->cppobj = new RRType(stc);
     }
     return (ret);
 }
 
-static PyObject*
+PyObject*
 RRType_NSEC3PARAM(s_RRType*) {
     return (RRType_createStatic(RRType::NSEC3PARAM()));
 }
 
-static PyObject*
+PyObject*
 RRType_DNAME(s_RRType*) {
     return (RRType_createStatic(RRType::DNAME()));
 }
 
-static PyObject*
+PyObject*
 RRType_PTR(s_RRType*) {
     return (RRType_createStatic(RRType::PTR()));
 }
 
-static PyObject*
+PyObject*
 RRType_MX(s_RRType*) {
     return (RRType_createStatic(RRType::MX()));
 }
 
-static PyObject*
+PyObject*
 RRType_DNSKEY(s_RRType*) {
     return (RRType_createStatic(RRType::DNSKEY()));
 }
 
-static PyObject*
+PyObject*
 RRType_TXT(s_RRType*) {
     return (RRType_createStatic(RRType::TXT()));
 }
 
-static PyObject*
+PyObject*
 RRType_RRSIG(s_RRType*) {
     return (RRType_createStatic(RRType::RRSIG()));
 }
 
-static PyObject*
+PyObject*
 RRType_NSEC(s_RRType*) {
     return (RRType_createStatic(RRType::NSEC()));
 }
 
-static PyObject*
+PyObject*
 RRType_AAAA(s_RRType*) {
     return (RRType_createStatic(RRType::AAAA()));
 }
 
-static PyObject*
+PyObject*
 RRType_DS(s_RRType*) {
     return (RRType_createStatic(RRType::DS()));
 }
 
-static PyObject*
+PyObject*
 RRType_OPT(s_RRType*) {
     return (RRType_createStatic(RRType::OPT()));
 }
 
-static PyObject*
+PyObject*
 RRType_A(s_RRType*) {
     return (RRType_createStatic(RRType::A()));
 }
 
-static PyObject*
+PyObject*
 RRType_NS(s_RRType*) {
     return (RRType_createStatic(RRType::NS()));
 }
 
-static PyObject*
+PyObject*
 RRType_CNAME(s_RRType*) {
     return (RRType_createStatic(RRType::CNAME()));
 }
 
-static PyObject*
+PyObject*
 RRType_SOA(s_RRType*) {
     return (RRType_createStatic(RRType::SOA()));
 }
 
-static PyObject*
+PyObject*
 RRType_NSEC3(s_RRType*) {
     return (RRType_createStatic(RRType::NSEC3()));
 }
 
-static PyObject*
+PyObject*
 RRType_IXFR(s_RRType*) {
     return (RRType_createStatic(RRType::IXFR()));
 }
 
-static PyObject*
+PyObject*
 RRType_AXFR(s_RRType*) {
     return (RRType_createStatic(RRType::AXFR()));
 }
 
-static PyObject*
+PyObject*
 RRType_ANY(s_RRType*) {
     return (RRType_createStatic(RRType::ANY()));
 }
 
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
 
-// end of RRType
+PyObject* po_InvalidRRType;
+PyObject* po_IncompleteRRType;
+
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_RRType
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject rrtype_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.RRType",
+    sizeof(s_RRType),                   // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)RRType_destroy,         // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    RRType_str,                         // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The RRType class encapsulates DNS resource record types.\n\n"
+    "This class manages the 16-bit integer type codes in quite a straightforward "
+    "way. The only non trivial task is to handle textual representations of "
+    "RR types, such as \"A\", \"AAAA\", or \"TYPE65534\".",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    (richcmpfunc)RRType_richcmp,        // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    RRType_methods,                     // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)RRType_init,              // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
 
+PyObject*
+createRRTypeObject(const RRType& source) {
+    RRTypeContainer container(PyObject_New(s_RRType, &rrtype_type));
+    container.set(new RRType(source));
+    return (container.release());
+}
 
-// Module Initialization, all statics are initialized here
 bool
-initModulePart_RRType(PyObject* mod) {
-    // Add the exceptions to the module
-    po_InvalidRRType = PyErr_NewException("pydnspp.InvalidRRType", NULL, NULL);
-    PyModule_AddObject(mod, "InvalidRRType", po_InvalidRRType);
-    po_IncompleteRRType = PyErr_NewException("pydnspp.IncompleteRRType", NULL, NULL);
-    PyModule_AddObject(mod, "IncompleteRRType", po_IncompleteRRType);
-
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&rrtype_type) < 0) {
-        return (false);
+PyRRType_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &rrtype_type));
+}
+
+const RRType&
+PyRRType_ToRRType(const PyObject* rrtype_obj) {
+    if (rrtype_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in RRType PyObject conversion");
     }
-    Py_INCREF(&rrtype_type);
-    PyModule_AddObject(mod, "RRType",
-                       reinterpret_cast<PyObject*>(&rrtype_type));
-    
-    return (true);
+    const s_RRType* rrtype = static_cast<const s_RRType*>(rrtype_obj);
+    return (*rrtype->cppobj);
 }
+
+
+} // end namespace python
+} // end namespace dns
+} // end namespace isc
diff --git a/src/lib/dns/python/rrtype_python.h b/src/lib/dns/python/rrtype_python.h
new file mode 100644
index 0000000..596598e
--- /dev/null
+++ b/src/lib/dns/python/rrtype_python.h
@@ -0,0 +1,68 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_RRTYPE_H
+#define __PYTHON_RRTYPE_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class RRType;
+
+namespace python {
+
+extern PyObject* po_InvalidRRType;
+extern PyObject* po_IncompleteRRType;
+
+extern PyTypeObject rrtype_type;
+
+/// This is a simple shortcut to create a python RRType object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createRRTypeObject(const RRType& source);
+
+/// \brief Checks if the given python object is a RRType object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type RRType, false otherwise
+bool PyRRType_Check(PyObject* obj);
+
+/// \brief Returns a reference to the RRType object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type RRType; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyRRType_Check()
+///
+/// \note This is not a copy; if the RRType is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrtype_obj The rrtype object to convert
+const RRType& PyRRType_ToRRType(const PyObject* rrtype_obj);
+
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_RRTYPE_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/serial_python.cc b/src/lib/dns/python/serial_python.cc
new file mode 100644
index 0000000..e2bd809
--- /dev/null
+++ b/src/lib/dns/python/serial_python.cc
@@ -0,0 +1,281 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <dns/serial.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "serial_python.h"
+#include "pydnspp_common.h"
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_Serial : public PyObject {
+public:
+    s_Serial() : cppobj(NULL) {};
+    isc::dns::Serial* cppobj;
+};
+
+typedef CPPPyObjectContainer<s_Serial, Serial> SerialContainer;
+
+PyObject* Serial_str(PyObject* self);
+PyObject* Serial_getValue(s_Serial* self);
+PyObject* Serial_richcmp(s_Serial* self, s_Serial* other, int op);
+PyObject* Serial_add(PyObject *right, PyObject *left);
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef Serial_methods[] = {
+    { "get_value", reinterpret_cast<PyCFunction>(Serial_getValue), METH_NOARGS,
+      "Returns the Serial value as an integer" },
+    { NULL, NULL, 0, NULL }
+};
+
+// For overriding the + operator. We do not define any other operators for
+// this type.
+PyNumberMethods Serial_NumberMethods = {
+    Serial_add, //nb_add;
+    NULL, //nb_subtract;
+    NULL, //nb_multiply;
+    NULL, //nb_remainder;
+    NULL, //nb_divmod;
+    NULL, //nb_power;
+    NULL, //nb_negative;
+    NULL, //nb_positive;
+    NULL, //nb_absolute;
+    NULL, //nb_bool;
+    NULL, //nb_invert;
+    NULL, //nb_lshift;
+    NULL, //nb_rshift;
+    NULL, //nb_and;
+    NULL, //nb_xor;
+    NULL, //nb_or;
+    NULL, //nb_int;
+    NULL, //nb_reserved;
+    NULL, //nb_float;
+
+    NULL, //nb_inplace_add;
+    NULL, //nb_inplace_subtract;
+    NULL, //nb_inplace_multiply;
+    NULL, //nb_inplace_remainder;
+    NULL, //nb_inplace_power;
+    NULL, //nb_inplace_lshift;
+    NULL, //nb_inplace_rshift;
+    NULL, //nb_inplace_and;
+    NULL, //nb_inplace_xor;
+    NULL, //nb_inplace_or;
+
+    NULL, //nb_floor_divide;
+    NULL, //nb_true_divide;
+    NULL, //nb_inplace_floor_divide;
+    NULL, //nb_inplace_true_divide;
+
+    NULL, //nb_index;
+};
+
+int
+Serial_init(s_Serial* self, PyObject* args) {
+    long long i;
+    if (PyArg_ParseTuple(args, "L", &i)) {
+        PyErr_Clear();
+        if (i < 0 || i > 0xffffffff) {
+            PyErr_SetString(PyExc_ValueError, "Serial number out of range");
+            return (-1);
+        }
+        self->cppobj = new Serial(i);
+        return (0);
+    } else {
+        return (-1);
+    }
+}
+
+void
+Serial_destroy(s_Serial* self) {
+    delete self->cppobj;
+    self->cppobj = NULL;
+    Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+Serial_getValue(s_Serial* self) {
+    return (Py_BuildValue("I", self->cppobj->getValue()));
+}
+
+PyObject*
+Serial_str(PyObject* po_self) {
+    const s_Serial* const self = static_cast<s_Serial*>(po_self);
+    return (PyUnicode_FromFormat("%u", self->cppobj->getValue()));
+}
+
+PyObject*
+Serial_richcmp(s_Serial* self, s_Serial* other, int op) {
+    bool c = false;
+
+    // Check for null and if the types match. If different type,
+    // simply return False
+    if (!other || (self->ob_type != other->ob_type)) {
+        Py_RETURN_FALSE;
+    }
+
+    switch (op) {
+    case Py_LT:
+        c = *self->cppobj < *other->cppobj;
+        break;
+    case Py_LE:
+        c = *self->cppobj <= *other->cppobj;
+        break;
+    case Py_EQ:
+        c = *self->cppobj == *other->cppobj;
+        break;
+    case Py_NE:
+        c = *self->cppobj != *other->cppobj;
+        break;
+    case Py_GT:
+        c = *self->cppobj > *other->cppobj;
+        break;
+    case Py_GE:
+        c = *self->cppobj >= *other->cppobj;
+        break;
+    }
+    if (c) {
+        Py_RETURN_TRUE;
+    } else {
+        Py_RETURN_FALSE;
+    }
+}
+
+PyObject *
+Serial_add(PyObject *left, PyObject *right) {
+    // Either can be either a serial or a long, as long as one of them is a
+    // serial
+    if (PySerial_Check(left) && PySerial_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(left) +
+                                   PySerial_ToSerial(right)));
+    } else if (PySerial_Check(left) && PyLong_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(left) +
+                                   PyLong_AsLong(right)));
+    } else if (PyLong_Check(left) && PySerial_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(right) +
+                                   PyLong_AsLong(left)));
+    } else {
+        Py_INCREF(Py_NotImplemented);
+        return Py_NotImplemented;
+    }
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Serial
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject serial_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.Serial",
+    sizeof(s_Serial),                   // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)Serial_destroy,         // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    &Serial_NumberMethods,              // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    Serial_str,                         // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The Serial class encapsulates Serials used in DNS SOA records.\n\n"
+    "This is a straightforward class; an Serial object simply maintains a "
+    "32-bit unsigned integer corresponding to the SOA SERIAL value.  The "
+    "main purpose of this class is to provide serial number arithmetic, as "
+    "described in RFC 1892. Objects of this type can be compared and added "
+    "to each other, as described in RFC 1892. Apart from str(), get_value(), "
+    "comparison operators, and the + operator, no other operations are "
+    "defined for this type.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    (richcmpfunc)Serial_richcmp,        // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    Serial_methods,                     // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)Serial_init,              // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createSerialObject(const Serial& source) {
+    SerialContainer container(PyObject_New(s_Serial, &serial_type));
+    container.set(new Serial(source));
+    return (container.release());
+}
+
+bool
+PySerial_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Serial typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &serial_type));
+}
+
+const Serial&
+PySerial_ToSerial(const PyObject* serial_obj) {
+    if (serial_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Serial PyObject conversion");
+    }
+    const s_Serial* serial = static_cast<const s_Serial*>(serial_obj);
+    return (*serial->cppobj);
+}
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/serial_python.h b/src/lib/dns/python/serial_python.h
new file mode 100644
index 0000000..48b5199
--- /dev/null
+++ b/src/lib/dns/python/serial_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_SERIAL_H
+#define __PYTHON_SERIAL_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Serial;
+
+namespace python {
+
+extern PyTypeObject serial_type;
+
+/// This is a simple shortcut to create a python Serial object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createSerialObject(const Serial& source);
+
+/// \brief Checks if the given python object is a Serial object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Serial, false otherwise
+bool PySerial_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Serial object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type Serial; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PySerial_Check()
+///
+/// \note This is not a copy; if the Serial is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param Serial_obj The Serial object to convert
+const Serial& PySerial_ToSerial(const PyObject* Serial_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_SERIAL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tests/Makefile.am b/src/lib/dns/python/tests/Makefile.am
index 61d7df6..3338727 100644
--- a/src/lib/dns/python/tests/Makefile.am
+++ b/src/lib/dns/python/tests/Makefile.am
@@ -11,6 +11,7 @@ PYTESTS += rrclass_python_test.py
 PYTESTS += rrset_python_test.py
 PYTESTS += rrttl_python_test.py
 PYTESTS += rrtype_python_test.py
+PYTESTS += serial_python_test.py
 PYTESTS += tsig_python_test.py
 PYTESTS += tsig_rdata_python_test.py
 PYTESTS += tsigerror_python_test.py
@@ -24,7 +25,7 @@ EXTRA_DIST += testutil.py
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
diff --git a/src/lib/dns/python/tests/message_python_test.py b/src/lib/dns/python/tests/message_python_test.py
index c731253..86574fb 100644
--- a/src/lib/dns/python/tests/message_python_test.py
+++ b/src/lib/dns/python/tests/message_python_test.py
@@ -17,6 +17,7 @@
 # Tests for the message part of the pydnspp module
 #
 
+import sys
 import unittest
 import os
 from pydnspp import *
@@ -29,9 +30,9 @@ if "TESTDATA_PATH" in os.environ:
 else:
     testdata_path = "../tests/testdata"
 
-def factoryFromFile(message, file):
+def factoryFromFile(message, file, parse_options=Message.PARSE_DEFAULT):
     data = read_wire_data(file)
-    message.from_wire(data)
+    message.from_wire(data, parse_options)
     return data
 
 # we don't have direct comparison for rrsets right now (should we?
@@ -230,6 +231,14 @@ class MessageTest(unittest.TestCase):
         self.assertTrue(compare_rrset_list(section_rrset, self.r.get_section(Message.SECTION_ANSWER)))
         self.assertEqual(2, self.r.get_rr_count(Message.SECTION_ANSWER))
 
+        # We always make a new deep copy in get_section(), so the reference
+        # count of the returned list and its each item should be 1; otherwise
+        # they would leak.
+        self.assertEqual(1, sys.getrefcount(self.r.get_section(
+                    Message.SECTION_ANSWER)))
+        self.assertEqual(1, sys.getrefcount(self.r.get_section(
+                    Message.SECTION_ANSWER)[0]))
+
         self.assertFalse(compare_rrset_list(section_rrset, self.r.get_section(Message.SECTION_AUTHORITY)))
         self.assertEqual(0, self.r.get_rr_count(Message.SECTION_AUTHORITY))
         self.r.add_rrset(Message.SECTION_AUTHORITY, self.rrset_a)
@@ -242,7 +251,7 @@ class MessageTest(unittest.TestCase):
         self.assertTrue(compare_rrset_list(section_rrset, self.r.get_section(Message.SECTION_ADDITIONAL)))
         self.assertEqual(2, self.r.get_rr_count(Message.SECTION_ADDITIONAL))
 
-    def test_add_question(self):
+    def test_add_and_get_question(self):
         self.assertRaises(TypeError, self.r.add_question, "wrong", "wrong")
         q = Question(Name("example.com"), RRClass("IN"), RRType("A"))
         qs = [q]
@@ -252,6 +261,12 @@ class MessageTest(unittest.TestCase):
         self.assertTrue(compare_rrset_list(qs, self.r.get_question()))
         self.assertEqual(1, self.r.get_rr_count(Message.SECTION_QUESTION))
 
+        # We always make a new deep copy in get_section(), so the reference
+        # count of the returned list and its each item should be 1; otherwise
+        # they would leak.
+        self.assertEqual(1, sys.getrefcount(self.r.get_question()))
+        self.assertEqual(1, sys.getrefcount(self.r.get_question()[0]))
+
     def test_add_rrset(self):
         self.assertRaises(TypeError, self.r.add_rrset, "wrong")
         self.assertRaises(TypeError, self.r.add_rrset)
@@ -466,6 +481,54 @@ test.example.com. 3600 IN A 192.0.2.2
         self.assertEqual("192.0.2.2", rdata[1].to_text())
         self.assertEqual(2, len(rdata))
 
+    def test_from_wire_short_buffer(self):
+        data = read_wire_data("message_fromWire22.wire")
+        self.assertRaises(DNSMessageFORMERR, self.p.from_wire, data[:-1])
+
+    def test_from_wire_combind_rrs(self):
+        factoryFromFile(self.p, "message_fromWire19.wire")
+        rrset = self.p.get_section(Message.SECTION_ANSWER)[0]
+        self.assertEqual(RRType("A"), rrset.get_type())
+        self.assertEqual(2, len(rrset.get_rdata()))
+
+        rrset = self.p.get_section(Message.SECTION_ANSWER)[1]
+        self.assertEqual(RRType("AAAA"), rrset.get_type())
+        self.assertEqual(1, len(rrset.get_rdata()))
+
+    def check_preserve_rrs(self, message, section):
+        rrset = message.get_section(section)[0]
+        self.assertEqual(RRType("A"), rrset.get_type())
+        rdata = rrset.get_rdata()
+        self.assertEqual(1, len(rdata))
+        self.assertEqual('192.0.2.1', rdata[0].to_text())
+
+        rrset = message.get_section(section)[1]
+        self.assertEqual(RRType("AAAA"), rrset.get_type())
+        rdata = rrset.get_rdata()
+        self.assertEqual(1, len(rdata))
+        self.assertEqual('2001:db8::1', rdata[0].to_text())
+
+        rrset = message.get_section(section)[2]
+        self.assertEqual(RRType("A"), rrset.get_type())
+        rdata = rrset.get_rdata()
+        self.assertEqual(1, len(rdata))
+        self.assertEqual('192.0.2.2', rdata[0].to_text())
+
+    def test_from_wire_preserve_answer(self):
+        factoryFromFile(self.p, "message_fromWire19.wire",
+                        Message.PRESERVE_ORDER)
+        self.check_preserve_rrs(self.p, Message.SECTION_ANSWER)
+
+    def test_from_wire_preserve_authority(self):
+        factoryFromFile(self.p, "message_fromWire20.wire",
+                        Message.PRESERVE_ORDER)
+        self.check_preserve_rrs(self.p, Message.SECTION_AUTHORITY)
+
+    def test_from_wire_preserve_additional(self):
+        factoryFromFile(self.p, "message_fromWire21.wire",
+                        Message.PRESERVE_ORDER)
+        self.check_preserve_rrs(self.p, Message.SECTION_ADDITIONAL)
+
     def test_EDNS0ExtCode(self):
         # Extended Rcode = BADVERS
         message_parse = Message(Message.PARSE)
diff --git a/src/lib/dns/python/tests/name_python_test.py b/src/lib/dns/python/tests/name_python_test.py
index b8e625a..5263412 100644
--- a/src/lib/dns/python/tests/name_python_test.py
+++ b/src/lib/dns/python/tests/name_python_test.py
@@ -121,6 +121,15 @@ class NameTest(unittest.TestCase):
         self.assertEqual(".", str(self.name2))
         self.assertEqual("something.completely.different.", self.name3.to_text())
 
+        self.assertEqual("example.com.", self.name1.to_text(False))
+        self.assertEqual("example.com", self.name1.to_text(True))
+
+        # make sure it does not behave unexpectedly on wrong arguments
+        self.assertRaises(TypeError, self.name1.to_text, True, 1)
+        self.assertRaises(TypeError, self.name1.to_text, 1)
+        self.assertRaises(TypeError, self.name1.to_text, [])
+        self.assertRaises(TypeError, self.name1.to_text, "foo")
+
     def test_to_wire(self):
         b1 = bytearray()
         self.name1.to_wire(b1)
diff --git a/src/lib/dns/python/tests/rdata_python_test.py b/src/lib/dns/python/tests/rdata_python_test.py
index 776f792..81dea5f 100644
--- a/src/lib/dns/python/tests/rdata_python_test.py
+++ b/src/lib/dns/python/tests/rdata_python_test.py
@@ -35,6 +35,14 @@ class RdataTest(unittest.TestCase):
         self.assertRaises(TypeError, Rdata, "wrong", RRClass("IN"), "192.0.2.99")
         self.assertRaises(TypeError, Rdata, RRType("A"), "wrong", "192.0.2.99")
         self.assertRaises(TypeError, Rdata, RRType("A"), RRClass("IN"), 1)
+        self.assertRaises(InvalidRdataText, Rdata, RRType("A"), RRClass("IN"),
+                          "Invalid Rdata Text")
+        self.assertRaises(CharStringTooLong, Rdata, RRType("TXT"),
+                          RRClass("IN"), ' ' * 256)
+        self.assertRaises(InvalidRdataLength, Rdata, RRType("TXT"),
+                          RRClass("IN"), bytes(65536))
+        self.assertRaises(DNSMessageFORMERR, Rdata, RRType("TXT"),
+                          RRClass("IN"), b"\xff")
 
     def test_rdata_to_wire(self):
         b = bytearray()
diff --git a/src/lib/dns/python/tests/rrset_python_test.py b/src/lib/dns/python/tests/rrset_python_test.py
index e0eab4a..de475a7 100644
--- a/src/lib/dns/python/tests/rrset_python_test.py
+++ b/src/lib/dns/python/tests/rrset_python_test.py
@@ -17,6 +17,7 @@
 # Tests for the rrtype part of the pydnspp module
 #
 
+import sys
 import unittest
 import os
 from pydnspp import *
@@ -110,6 +111,12 @@ class TestModuleSpec(unittest.TestCase):
                 ]
         self.assertEqual(rdata, self.rrset_a.get_rdata())
         self.assertEqual([], self.rrset_a_empty.get_rdata())
+
+        # We always make a new deep copy in get_rdata(), so the reference
+        # count of the returned list and its each item should be 1; otherwise
+        # they would leak.
+        self.assertEqual(1, sys.getrefcount(self.rrset_a.get_rdata()))
+        self.assertEqual(1, sys.getrefcount(self.rrset_a.get_rdata()[0]))
         
 if __name__ == '__main__':
     unittest.main()
diff --git a/src/lib/dns/python/tests/serial_python_test.py b/src/lib/dns/python/tests/serial_python_test.py
new file mode 100644
index 0000000..0ca08c2
--- /dev/null
+++ b/src/lib/dns/python/tests/serial_python_test.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Tests for the rrttl part of the pydnspp module
+#
+
+import unittest
+import os
+from pydnspp import *
+
+class SerialTest(unittest.TestCase):
+    def setUp(self):
+        self.one = Serial(1)
+        self.one_2 = Serial(1)
+        self.two = Serial(2)
+        self.date_zero = Serial(1980120100)
+        self.date_one = Serial(1980120101)
+        self.zero = Serial(0)
+        self.highest = Serial(4294967295)
+        self.number_low = Serial(12345)
+        self.number_medium = Serial(2000000000)
+        self.number_high = Serial(4000000000)
+
+    def test_init(self):
+        self.assertRaises(ValueError, Serial, -1)
+        self.assertRaises(ValueError, Serial, 4294967296)
+        self.assertRaises(ValueError, Serial, 4294967297)
+        self.assertRaises(ValueError, Serial, 100000000000)
+
+    def test_get_value(self):
+        self.assertEqual(1, self.one.get_value())
+        self.assertNotEqual(2, self.one_2.get_value())
+        self.assertEqual(2, self.two.get_value())
+        self.assertEqual(1980120100, self.date_zero.get_value())
+        self.assertEqual(1980120101, self.date_one.get_value())
+        self.assertEqual(0, self.zero.get_value())
+        self.assertEqual(4294967295, self.highest.get_value())
+        self.assertEqual(12345, self.number_low.get_value())
+        self.assertEqual(2000000000, self.number_medium.get_value())
+        self.assertEqual(4000000000, self.number_high.get_value())
+
+    def test_str(self):
+        self.assertEqual('1', str(self.one))
+        self.assertNotEqual('2', str(self.one_2))
+        self.assertEqual('2', str(self.two))
+        self.assertEqual('1980120100', str(self.date_zero))
+        self.assertEqual('1980120101', str(self.date_one))
+        self.assertEqual('0', str(self.zero))
+        self.assertEqual('4294967295', str(self.highest))
+        self.assertEqual('12345', str(self.number_low))
+        self.assertEqual('2000000000', str(self.number_medium))
+        self.assertEqual('4000000000', str(self.number_high))
+
+    def test_equals(self):
+        self.assertEqual(self.one, self.one)
+        self.assertEqual(self.one, self.one_2)
+        self.assertNotEqual(self.one, self.two)
+        self.assertNotEqual(self.two, self.one)
+        self.assertEqual(Serial(12345), self.number_low)
+        self.assertNotEqual(Serial(12346), self.number_low)
+
+    def test_compare(self):
+        # These should be true/false even without serial arithmetic
+        self.assertLessEqual(self.one, self.one)
+        self.assertLessEqual(self.one, self.one_2)
+        self.assertLess(self.one, self.two)
+        self.assertLessEqual(self.one, self.one)
+        self.assertLessEqual(self.one, self.two)
+        self.assertGreater(self.two, self.one)
+        self.assertGreaterEqual(self.two, self.two)
+        self.assertGreaterEqual(self.two, self.one)
+        self.assertLess(self.one, self.number_low)
+        self.assertLess(self.number_low, self.number_medium)
+        self.assertLess(self.number_medium, self.number_high)
+
+        # These should 'wrap'
+        self.assertGreater(self.zero, self.highest)
+        self.assertLess(self.highest, self.one)
+        self.assertLess(self.number_high, self.number_low)
+
+    def test_addition(self):
+        self.assertEqual(self.two, self.one + self.one)
+        self.assertEqual(self.two, self.one + self.one_2)
+        self.assertEqual(self.highest, self.highest + self.zero)
+        self.assertEqual(self.zero, self.highest + self.one)
+        self.assertEqual(self.one, self.highest + self.two)
+        self.assertEqual(self.one, self.highest + self.one + self.one)
+        self.assertEqual(self.one + 100, self.highest + 102)
+        self.assertEqual(100 + self.one, self.highest + 102)
+        self.assertEqual(self.zero + 2147483645, self.highest + 2147483646)
+
+        # using lambda so the error doesn't get thrown on initial evaluation
+        self.assertRaises(TypeError, lambda: self.zero + "bad")
+        self.assertRaises(TypeError, lambda: self.zero + None)
+        self.assertRaises(TypeError, lambda: "bad" + self.zero)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/lib/dns/python/tsig_python.cc b/src/lib/dns/python/tsig_python.cc
index db93a08..0764e33 100644
--- a/src/lib/dns/python/tsig_python.cc
+++ b/src/lib/dns/python/tsig_python.cc
@@ -37,23 +37,18 @@ using namespace isc::util::python;
 using namespace isc::dns;
 using namespace isc::dns::python;
 
-//
-// Definition of the classes
-//
-
 // For each class, we need a struct, a helper functions (init, destroy,
 // and static wrappers around the methods we export), a list of methods,
 // and a type description
 
-//
-// TSIGContext
-//
-
-// Trivial constructor.
-s_TSIGContext::s_TSIGContext() : cppobj(NULL) {
-}
-
 namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGContext : public PyObject {
+public:
+    s_TSIGContext() : cppobj(NULL) {};
+    TSIGContext* cppobj;
+};
+
 // Shortcut type which would be convenient for adding class variables safely.
 typedef CPPPyObjectContainer<s_TSIGContext, TSIGContext> TSIGContextContainer;
 
@@ -101,23 +96,23 @@ int
 TSIGContext_init(s_TSIGContext* self, PyObject* args) {
     try {
         // "From key" constructor
-        const s_TSIGKey* tsigkey_obj;
+        const PyObject* tsigkey_obj;
         if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey_obj)) {
-            self->cppobj = new TSIGContext(*tsigkey_obj->cppobj);
+            self->cppobj = new TSIGContext(PyTSIGKey_ToTSIGKey(tsigkey_obj));
             return (0);
         }
 
         // "From key param + keyring" constructor
         PyErr_Clear();
-        const s_Name* keyname_obj;
-        const s_Name* algname_obj;
-        const s_TSIGKeyRing* keyring_obj;
+        const PyObject* keyname_obj;
+        const PyObject* algname_obj;
+        const PyObject* keyring_obj;
         if (PyArg_ParseTuple(args, "O!O!O!", &name_type, &keyname_obj,
                              &name_type, &algname_obj, &tsigkeyring_type,
                              &keyring_obj)) {
-            self->cppobj = new TSIGContext(*keyname_obj->cppobj,
-                                           *algname_obj->cppobj,
-                                           *keyring_obj->cppobj);
+            self->cppobj = new TSIGContext(PyName_ToName(keyname_obj),
+                                           PyName_ToName(algname_obj),
+                                           PyTSIGKeyRing_ToTSIGKeyRing(keyring_obj));
             return (0);
         }
     } catch (const exception& ex) {
@@ -153,7 +148,7 @@ PyObject*
 TSIGContext_getError(s_TSIGContext* self) {
     try {
         PyObjectContainer container(createTSIGErrorObject(
-                                        self->cppobj->getError()));
+                                    self->cppobj->getError()));
         return (Py_BuildValue("O", container.get()));
     } catch (const exception& ex) {
         const string ex_what =
@@ -205,13 +200,13 @@ PyObject*
 TSIGContext_verify(s_TSIGContext* self, PyObject* args) {
     const char* data;
     Py_ssize_t data_len;
-    s_TSIGRecord* py_record;
+    PyObject* py_record;
     PyObject* py_maybe_none;
-    TSIGRecord* record;
+    const TSIGRecord* record;
 
     if (PyArg_ParseTuple(args, "O!y#", &tsigrecord_type, &py_record,
                          &data, &data_len)) {
-        record = py_record->cppobj;
+        record = &PyTSIGRecord_ToTSIGRecord(py_record);
     } else if (PyArg_ParseTuple(args, "Oy#", &py_maybe_none, &data,
                                 &data_len)) {
         record = NULL;
@@ -264,7 +259,7 @@ PyTypeObject tsigcontext_type = {
     NULL,                               // tp_as_number
     NULL,                               // tp_as_sequence
     NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
+    NULL,                               // tp_hash
     NULL,                               // tp_call
     NULL,                               // tp_str
     NULL,                               // tp_getattro
@@ -307,58 +302,24 @@ PyTypeObject tsigcontext_type = {
     0                                   // tp_version_tag
 };
 
-// Module Initialization, all statics are initialized here
 bool
-initModulePart_TSIGContext(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&tsigcontext_type) < 0) {
-        return (false);
+PyTSIGContext_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
-    void* p = &tsigcontext_type;
-    if (PyModule_AddObject(mod, "TSIGContext",
-                           static_cast<PyObject*>(p)) < 0) {
-        return (false);
-    }
-    Py_INCREF(&tsigcontext_type);
+    return (PyObject_TypeCheck(obj, &tsigcontext_type));
+}
 
-    try {
-        // Class specific exceptions
-        po_TSIGContextError = PyErr_NewException("pydnspp.TSIGContextError",
-                                                 po_IscException, NULL);
-        PyObjectContainer(po_TSIGContextError).installToModule(
-            mod, "TSIGContextError");
-
-        // Constant class variables
-        installClassVariable(tsigcontext_type, "STATE_INIT",
-                             Py_BuildValue("I", TSIGContext::INIT));
-        installClassVariable(tsigcontext_type, "STATE_SENT_REQUEST",
-                             Py_BuildValue("I", TSIGContext::SENT_REQUEST));
-        installClassVariable(tsigcontext_type, "STATE_RECEIVED_REQUEST",
-                             Py_BuildValue("I", TSIGContext::RECEIVED_REQUEST));
-        installClassVariable(tsigcontext_type, "STATE_SENT_RESPONSE",
-                             Py_BuildValue("I", TSIGContext::SENT_RESPONSE));
-        installClassVariable(tsigcontext_type, "STATE_VERIFIED_RESPONSE",
-                             Py_BuildValue("I",
-                                           TSIGContext::VERIFIED_RESPONSE));
-
-        installClassVariable(tsigcontext_type, "DEFAULT_FUDGE",
-                             Py_BuildValue("H", TSIGContext::DEFAULT_FUDGE));
-    } catch (const exception& ex) {
-        const string ex_what =
-            "Unexpected failure in TSIGContext initialization: " +
-            string(ex.what());
-        PyErr_SetString(po_IscException, ex_what.c_str());
-        return (false);
-    } catch (...) {
-        PyErr_SetString(PyExc_SystemError,
-                        "Unexpected failure in TSIGContext initialization");
-        return (false);
+TSIGContext&
+PyTSIGContext_ToTSIGContext(PyObject* tsigcontext_obj) {
+    if (tsigcontext_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in TSIGContext PyObject conversion");
     }
-
-    return (true);
+    s_TSIGContext* tsigcontext = static_cast<s_TSIGContext*>(tsigcontext_obj);
+    return (*tsigcontext->cppobj);
 }
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/tsig_python.h b/src/lib/dns/python/tsig_python.h
index f9b4f7b..e4e9fff 100644
--- a/src/lib/dns/python/tsig_python.h
+++ b/src/lib/dns/python/tsig_python.h
@@ -23,19 +23,31 @@ class TSIGContext;
 
 namespace python {
 
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGContext : public PyObject {
-public:
-    s_TSIGContext();
-    TSIGContext* cppobj;
-};
-
 extern PyTypeObject tsigcontext_type;
 
 // Class specific exceptions
 extern PyObject* po_TSIGContextError;
 
-bool initModulePart_TSIGContext(PyObject* mod);
+/// \brief Checks if the given python object is a TSIGContext object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGContext, false otherwise
+bool PyTSIGContext_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGContext object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type TSIGContext; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyTSIGContext_Check()
+///
+/// \note This is not a copy; if the TSIGContext is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigcontext_obj The tsigcontext object to convert
+TSIGContext& PyTSIGContext_ToTSIGContext(PyObject* tsigcontext_obj);
+
 
 } // namespace python
 } // namespace dns
diff --git a/src/lib/dns/python/tsig_rdata_python.cc b/src/lib/dns/python/tsig_rdata_python.cc
index 4e4f287..6ec0f09 100644
--- a/src/lib/dns/python/tsig_rdata_python.cc
+++ b/src/lib/dns/python/tsig_rdata_python.cc
@@ -12,6 +12,7 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#define PY_SSIZE_T_CLEAN
 #include <Python.h>
 
 #include <string>
@@ -32,23 +33,19 @@ using namespace isc::dns;
 using namespace isc::dns::rdata;
 using namespace isc::dns::python;
 
-//
-// Definition of the classes
-//
-
 // For each class, we need a struct, a helper functions (init, destroy,
 // and static wrappers around the methods we export), a list of methods,
 // and a type description
 
-//
-// TSIG RDATA
-//
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIG : public PyObject {
+public:
+    s_TSIG() : cppobj(NULL) {};
+    const rdata::any::TSIG* cppobj;
+};
 
-// Trivial constructor.
-s_TSIG::s_TSIG() : cppobj(NULL) {
-}
 
-namespace {
 // Shortcut type which would be convenient for adding class variables safely.
 typedef CPPPyObjectContainer<s_TSIG, any::TSIG> TSIGContainer;
 
@@ -235,7 +232,7 @@ TSIG_toWire(const s_TSIG* const self, PyObject* args) {
                 self, args));
 }
 
-PyObject* 
+PyObject*
 TSIG_richcmp(const s_TSIG* const self,
                    const s_TSIG* const other,
                    const int op)
@@ -302,7 +299,7 @@ PyTypeObject tsig_type = {
     NULL,                               // tp_as_number
     NULL,                               // tp_as_sequence
     NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
+    NULL,                               // tp_hash
     NULL,                               // tp_call
     TSIG_str,                       // tp_str
     NULL,                               // tp_getattro
@@ -340,30 +337,31 @@ PyTypeObject tsig_type = {
     0                                   // tp_version_tag
 };
 
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_TSIG(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&tsig_type) < 0) {
-        return (false);
-    }
-    void* p = &tsig_type;
-    if (PyModule_AddObject(mod, "TSIG", static_cast<PyObject*>(p)) < 0) {
-        return (false);
-    }
-    Py_INCREF(&tsig_type);
-
-    return (true);
-}
-
 PyObject*
 createTSIGObject(const any::TSIG& source) {
-    TSIGContainer container = PyObject_New(s_TSIG, &tsig_type);
+    TSIGContainer container(PyObject_New(s_TSIG, &tsig_type));
     container.set(new any::TSIG(source));
     return (container.release());
 }
+
+bool
+PyTSIG_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &tsig_type));
+}
+
+const any::TSIG&
+PyTSIG_ToTSIG(const PyObject* tsig_obj) {
+    if (tsig_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in TSIG PyObject conversion");
+    }
+    const s_TSIG* tsig = static_cast<const s_TSIG*>(tsig_obj);
+    return (*tsig->cppobj);
+}
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/tsig_rdata_python.h b/src/lib/dns/python/tsig_rdata_python.h
index e5e0c6c..a84d9e8 100644
--- a/src/lib/dns/python/tsig_rdata_python.h
+++ b/src/lib/dns/python/tsig_rdata_python.h
@@ -27,17 +27,8 @@ class TSIG;
 
 namespace python {
 
-// The s_* Class simply covers one instantiation of the object
-class s_TSIG : public PyObject {
-public:
-    s_TSIG();
-    const rdata::any::TSIG* cppobj;
-};
-
 extern PyTypeObject tsig_type;
 
-bool initModulePart_TSIG(PyObject* mod);
-
 /// This is A simple shortcut to create a python TSIG object (in the
 /// form of a pointer to PyObject) with minimal exception safety.
 /// On success, it returns a valid pointer to PyObject with a reference
@@ -47,6 +38,26 @@ bool initModulePart_TSIG(PyObject* mod);
 /// followed by necessary setup for python exception.
 PyObject* createTSIGObject(const rdata::any::TSIG& source);
 
+/// \brief Checks if the given python object is a TSIG object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIG, false otherwise
+bool PyTSIG_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIG object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type TSIG; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyTSIG_Check()
+///
+/// \note This is not a copy; if the TSIG is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsig_obj The tsig object to convert
+const rdata::any::TSIG& PyTSIG_ToTSIG(const PyObject* tsig_obj);
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/tsigerror_python.cc b/src/lib/dns/python/tsigerror_python.cc
index 0ad4716..7a0217e 100644
--- a/src/lib/dns/python/tsigerror_python.cc
+++ b/src/lib/dns/python/tsigerror_python.cc
@@ -30,26 +30,21 @@ using namespace isc::util::python;
 using namespace isc::dns;
 using namespace isc::dns::python;
 
-//
-// Definition of the classes
-//
-
 // For each class, we need a struct, a helper functions (init, destroy,
 // and static wrappers around the methods we export), a list of methods,
 // and a type description
 
-//
-// TSIGError
-//
-
-// Trivial constructor.
-s_TSIGError::s_TSIGError() : cppobj(NULL) {
-}
-
 // Import pydoc text
 #include "tsigerror_python_inc.cc"
 
 namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGError : public PyObject {
+public:
+    s_TSIGError() : cppobj(NULL) {};
+    const TSIGError* cppobj;
+};
+
 // Shortcut type which would be convenient for adding class variables safely.
 typedef CPPPyObjectContainer<s_TSIGError, TSIGError> TSIGErrorContainer;
 
@@ -107,9 +102,9 @@ TSIGError_init(s_TSIGError* self, PyObject* args) {
 
         // Constructor from Rcode
         PyErr_Clear();
-        s_Rcode* py_rcode;
+        PyObject* py_rcode;
         if (PyArg_ParseTuple(args, "O!", &rcode_type, &py_rcode)) {
-            self->cppobj = new TSIGError(*py_rcode->cppobj);
+            self->cppobj = new TSIGError(PyRcode_ToRcode(py_rcode));
             return (0);
         }
     } catch (const isc::OutOfRange& ex) {
@@ -172,13 +167,8 @@ TSIGError_str(PyObject* self) {
 
 PyObject*
 TSIGError_toRcode(const s_TSIGError* const self) {
-    typedef CPPPyObjectContainer<s_Rcode, Rcode> RcodePyObjectContainer;
-
     try {
-        RcodePyObjectContainer rcode_container(PyObject_New(s_Rcode,
-                                                            &rcode_type));
-        rcode_container.set(new Rcode(self->cppobj->toRcode()));
-        return (rcode_container.release());
+        return (createRcodeObject(self->cppobj->toRcode()));
     } catch (const exception& ex) {
         const string ex_what =
             "Failed to convert TSIGError to Rcode: " + string(ex.what());
@@ -190,7 +180,7 @@ TSIGError_toRcode(const s_TSIGError* const self) {
     return (NULL);
 }
 
-PyObject* 
+PyObject*
 TSIGError_richcmp(const s_TSIGError* const self,
                    const s_TSIGError* const other,
                    const int op)
@@ -252,7 +242,7 @@ PyTypeObject tsigerror_type = {
     NULL,                               // tp_as_number
     NULL,                               // tp_as_sequence
     NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
+    NULL,                               // tp_hash
     NULL,                               // tp_call
     // THIS MAY HAVE TO BE CHANGED TO NULL:
     TSIGError_str,                       // tp_str
@@ -290,78 +280,9 @@ PyTypeObject tsigerror_type = {
     0                                   // tp_version_tag
 };
 
-namespace {
-// Trivial shortcut to create and install TSIGError constants.
-inline void
-installTSIGErrorConstant(const char* name, const TSIGError& val) {
-    TSIGErrorContainer container(PyObject_New(s_TSIGError, &tsigerror_type));
-    container.installAsClassVariable(tsigerror_type, name, new TSIGError(val));
-}
-}
-
-// Module Initialization, all statics are initialized here
-bool
-initModulePart_TSIGError(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&tsigerror_type) < 0) {
-        return (false);
-    }
-    void* p = &tsigerror_type;
-    if (PyModule_AddObject(mod, "TSIGError", static_cast<PyObject*>(p)) < 0) {
-        return (false);
-    }
-    Py_INCREF(&tsigerror_type);
-
-    try {
-        // Constant class variables
-        // Error codes (bare values)
-        installClassVariable(tsigerror_type, "BAD_SIG_CODE",
-                             Py_BuildValue("H", TSIGError::BAD_SIG_CODE));
-        installClassVariable(tsigerror_type, "BAD_KEY_CODE",
-                             Py_BuildValue("H", TSIGError::BAD_KEY_CODE));
-        installClassVariable(tsigerror_type, "BAD_TIME_CODE",
-                             Py_BuildValue("H", TSIGError::BAD_TIME_CODE));
-
-        // Error codes (constant objects)
-        installTSIGErrorConstant("NOERROR", TSIGError::NOERROR());
-        installTSIGErrorConstant("FORMERR", TSIGError::FORMERR());
-        installTSIGErrorConstant("SERVFAIL", TSIGError::SERVFAIL());
-        installTSIGErrorConstant("NXDOMAIN", TSIGError::NXDOMAIN());
-        installTSIGErrorConstant("NOTIMP", TSIGError::NOTIMP());
-        installTSIGErrorConstant("REFUSED", TSIGError::REFUSED());
-        installTSIGErrorConstant("YXDOMAIN", TSIGError::YXDOMAIN());
-        installTSIGErrorConstant("YXRRSET", TSIGError::YXRRSET());
-        installTSIGErrorConstant("NXRRSET", TSIGError::NXRRSET());
-        installTSIGErrorConstant("NOTAUTH", TSIGError::NOTAUTH());
-        installTSIGErrorConstant("NOTZONE", TSIGError::NOTZONE());
-        installTSIGErrorConstant("RESERVED11", TSIGError::RESERVED11());
-        installTSIGErrorConstant("RESERVED12", TSIGError::RESERVED12());
-        installTSIGErrorConstant("RESERVED13", TSIGError::RESERVED13());
-        installTSIGErrorConstant("RESERVED14", TSIGError::RESERVED14());
-        installTSIGErrorConstant("RESERVED15", TSIGError::RESERVED15());
-        installTSIGErrorConstant("BAD_SIG", TSIGError::BAD_SIG());
-        installTSIGErrorConstant("BAD_KEY", TSIGError::BAD_KEY());
-        installTSIGErrorConstant("BAD_TIME", TSIGError::BAD_TIME());
-    } catch (const exception& ex) {
-        const string ex_what =
-            "Unexpected failure in TSIGError initialization: " +
-            string(ex.what());
-        PyErr_SetString(po_IscException, ex_what.c_str());
-        return (false);
-    } catch (...) {
-        PyErr_SetString(PyExc_SystemError,
-                        "Unexpected failure in TSIGError initialization");
-        return (false);
-    }
-
-    return (true);
-}
-
 PyObject*
 createTSIGErrorObject(const TSIGError& source) {
-    TSIGErrorContainer container = PyObject_New(s_TSIGError, &tsigerror_type);
+    TSIGErrorContainer container(PyObject_New(s_TSIGError, &tsigerror_type));
     container.set(new TSIGError(source));
     return (container.release());
 }
diff --git a/src/lib/dns/python/tsigerror_python.h b/src/lib/dns/python/tsigerror_python.h
index 735a480..0b5b630 100644
--- a/src/lib/dns/python/tsigerror_python.h
+++ b/src/lib/dns/python/tsigerror_python.h
@@ -23,17 +23,8 @@ class TSIGError;
 
 namespace python {
 
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGError : public PyObject {
-public:
-    s_TSIGError();
-    const TSIGError* cppobj;
-};
-
 extern PyTypeObject tsigerror_type;
 
-bool initModulePart_TSIGError(PyObject* mod);
-
 /// This is A simple shortcut to create a python TSIGError object (in the
 /// form of a pointer to PyObject) with minimal exception safety.
 /// On success, it returns a valid pointer to PyObject with a reference
@@ -42,6 +33,7 @@ bool initModulePart_TSIGError(PyObject* mod);
 /// This function is expected to be called with in a try block
 /// followed by necessary setup for python exception.
 PyObject* createTSIGErrorObject(const TSIGError& source);
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/tsigkey_python.cc b/src/lib/dns/python/tsigkey_python.cc
index f0906cb..cf79c1a 100644
--- a/src/lib/dns/python/tsigkey_python.cc
+++ b/src/lib/dns/python/tsigkey_python.cc
@@ -31,10 +31,6 @@ using namespace isc::util::python;
 using namespace isc::dns;
 using namespace isc::dns::python;
 
-//
-// Definition of the classes
-//
-
 // For each class, we need a struct, a helper functions (init, destroy,
 // and static wrappers around the methods we export), a list of methods,
 // and a type description
@@ -43,11 +39,14 @@ using namespace isc::dns::python;
 // TSIGKey
 //
 
+namespace {
 // The s_* Class simply covers one instantiation of the object
+class s_TSIGKey : public PyObject {
+public:
+    s_TSIGKey() : cppobj(NULL) {};
+    TSIGKey* cppobj;
+};
 
-s_TSIGKey::s_TSIGKey() : cppobj(NULL) {}
-
-namespace {
 //
 // We declare the functions here, the definitions are below
 // the type definition of the object, since both can use the other
@@ -96,8 +95,8 @@ TSIGKey_init(s_TSIGKey* self, PyObject* args) {
         }
 
         PyErr_Clear();
-        const s_Name* key_name;
-        const s_Name* algorithm_name;
+        const PyObject* key_name;
+        const PyObject* algorithm_name;
         PyObject* bytes_obj;
         const char* secret;
         Py_ssize_t secret_len;
@@ -107,8 +106,8 @@ TSIGKey_init(s_TSIGKey* self, PyObject* args) {
             if (secret_len == 0) {
                 secret = NULL;
             }
-            self->cppobj = new TSIGKey(*key_name->cppobj,
-                                       *algorithm_name->cppobj,
+            self->cppobj = new TSIGKey(PyName_ToName(key_name),
+                                       PyName_ToName(algorithm_name),
                                        secret, secret_len);
             return (0);
         }
@@ -196,7 +195,7 @@ PyTypeObject tsigkey_type = {
     NULL,                               // tp_as_number
     NULL,                               // tp_as_sequence
     NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
+    NULL,                               // tp_hash
     NULL,                               // tp_call
     NULL,                               // tp_str
     NULL,                               // tp_getattro
@@ -233,49 +232,20 @@ PyTypeObject tsigkey_type = {
     0                                   // tp_version_tag
 };
 
-// Module Initialization, all statics are initialized here
 bool
-initModulePart_TSIGKey(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&tsigkey_type) < 0) {
-        return (false);
-    }
-    void* p = &tsigkey_type;
-    if (PyModule_AddObject(mod, "TSIGKey", static_cast<PyObject*>(p)) != 0) {
-        return (false);
-    }
-    Py_INCREF(&tsigkey_type);
-
-    try {
-        // Constant class variables
-        installClassVariable(tsigkey_type, "HMACMD5_NAME",
-                             createNameObject(TSIGKey::HMACMD5_NAME()));
-        installClassVariable(tsigkey_type, "HMACSHA1_NAME",
-                             createNameObject(TSIGKey::HMACSHA1_NAME()));
-        installClassVariable(tsigkey_type, "HMACSHA256_NAME",
-                             createNameObject(TSIGKey::HMACSHA256_NAME()));
-        installClassVariable(tsigkey_type, "HMACSHA224_NAME",
-                             createNameObject(TSIGKey::HMACSHA224_NAME()));
-        installClassVariable(tsigkey_type, "HMACSHA384_NAME",
-                             createNameObject(TSIGKey::HMACSHA384_NAME()));
-        installClassVariable(tsigkey_type, "HMACSHA512_NAME",
-                             createNameObject(TSIGKey::HMACSHA512_NAME()));
-    } catch (const exception& ex) {
-        const string ex_what =
-            "Unexpected failure in TSIGKey initialization: " +
-            string(ex.what());
-        PyErr_SetString(po_IscException, ex_what.c_str());
-        return (false);
-    } catch (...) {
-        PyErr_SetString(PyExc_SystemError,
-                        "Unexpected failure in TSIGKey initialization");
-        return (false);
+PyTSIGKey_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
+    return (PyObject_TypeCheck(obj, &tsigkey_type));
+}
 
-    return (true);
+const TSIGKey&
+PyTSIGKey_ToTSIGKey(const PyObject* tsigkey_obj) {
+    const s_TSIGKey* tsigkey = static_cast<const s_TSIGKey*>(tsigkey_obj);
+    return (*tsigkey->cppobj);
 }
+
 } // namespace python
 } // namespace dns
 } // namespace isc
@@ -287,13 +257,14 @@ initModulePart_TSIGKey(PyObject* mod) {
 // TSIGKeyRing
 //
 
+namespace {
 // The s_* Class simply covers one instantiation of the object
+class s_TSIGKeyRing : public PyObject {
+public:
+    s_TSIGKeyRing() : cppobj(NULL) {};
+    TSIGKeyRing* cppobj;
+};
 
-// The s_* Class simply covers one instantiation of the object
-
-s_TSIGKeyRing::s_TSIGKeyRing() : cppobj(NULL) {}
-
-namespace {
 //
 // We declare the functions here, the definitions are below
 // the type definition of the object, since both can use the other
@@ -329,7 +300,7 @@ TSIGKeyRing_init(s_TSIGKeyRing* self, PyObject* args) {
                         "Invalid arguments to TSIGKeyRing constructor");
         return (-1);
     }
-    
+
     self->cppobj = new(nothrow) TSIGKeyRing();
     if (self->cppobj == NULL) {
         PyErr_SetString(po_IscException, "Allocating TSIGKeyRing failed");
@@ -354,7 +325,7 @@ TSIGKeyRing_size(const s_TSIGKeyRing* const self) {
 PyObject*
 TSIGKeyRing_add(const s_TSIGKeyRing* const self, PyObject* args) {
     s_TSIGKey* tsigkey;
-    
+
     if (PyArg_ParseTuple(args, "O!", &tsigkey_type, &tsigkey)) {
         try {
             const TSIGKeyRing::Result result =
@@ -374,11 +345,11 @@ TSIGKeyRing_add(const s_TSIGKeyRing* const self, PyObject* args) {
 
 PyObject*
 TSIGKeyRing_remove(const s_TSIGKeyRing* self, PyObject* args) {
-    s_Name* key_name;
+    PyObject* key_name;
 
     if (PyArg_ParseTuple(args, "O!", &name_type, &key_name)) {
         const TSIGKeyRing::Result result =
-            self->cppobj->remove(*key_name->cppobj);
+            self->cppobj->remove(PyName_ToName(key_name));
         return (Py_BuildValue("I", result));
     }
 
@@ -390,13 +361,14 @@ TSIGKeyRing_remove(const s_TSIGKeyRing* self, PyObject* args) {
 
 PyObject*
 TSIGKeyRing_find(const s_TSIGKeyRing* self, PyObject* args) {
-    s_Name* key_name;
-    s_Name* algorithm_name;
+    PyObject* key_name;
+    PyObject* algorithm_name;
 
     if (PyArg_ParseTuple(args, "O!O!", &name_type, &key_name,
                          &name_type, &algorithm_name)) {
         const TSIGKeyRing::FindResult result =
-            self->cppobj->find(*key_name->cppobj, *algorithm_name->cppobj);
+            self->cppobj->find(PyName_ToName(key_name),
+                               PyName_ToName(algorithm_name));
         if (result.key != NULL) {
             s_TSIGKey* key = PyObject_New(s_TSIGKey, &tsigkey_type);
             if (key == NULL) {
@@ -436,7 +408,7 @@ PyTypeObject tsigkeyring_type = {
     NULL,                               // tp_as_number
     NULL,                               // tp_as_sequence
     NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
+    NULL,                               // tp_hash
     NULL,                               // tp_call
     NULL,                               // tp_str
     NULL,                               // tp_getattro
@@ -473,27 +445,24 @@ PyTypeObject tsigkeyring_type = {
 };
 
 bool
-initModulePart_TSIGKeyRing(PyObject* mod) {
-    if (PyType_Ready(&tsigkeyring_type) < 0) {
-        return (false);
+PyTSIGKeyRing_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
-    Py_INCREF(&tsigkeyring_type);
-    void* p = &tsigkeyring_type;
-    if (PyModule_AddObject(mod, "TSIGKeyRing",
-                           static_cast<PyObject*>(p)) != 0) {
-        Py_DECREF(&tsigkeyring_type);
-        return (false);
-    }
-
-    addClassVariable(tsigkeyring_type, "SUCCESS",
-                     Py_BuildValue("I", TSIGKeyRing::SUCCESS));
-    addClassVariable(tsigkeyring_type, "EXIST",
-                     Py_BuildValue("I", TSIGKeyRing::EXIST));
-    addClassVariable(tsigkeyring_type, "NOTFOUND",
-                     Py_BuildValue("I", TSIGKeyRing::NOTFOUND));
+    return (PyObject_TypeCheck(obj, &tsigkeyring_type));
+}
 
-    return (true);
+const TSIGKeyRing&
+PyTSIGKeyRing_ToTSIGKeyRing(const PyObject* tsigkeyring_obj) {
+    if (tsigkeyring_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in TSIGKeyRing PyObject conversion");
+    }
+    const s_TSIGKeyRing* tsigkeyring =
+        static_cast<const s_TSIGKeyRing*>(tsigkeyring_obj);
+    return (*tsigkeyring->cppobj);
 }
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/tsigkey_python.h b/src/lib/dns/python/tsigkey_python.h
index 51b3ae7..6c3d2e3 100644
--- a/src/lib/dns/python/tsigkey_python.h
+++ b/src/lib/dns/python/tsigkey_python.h
@@ -24,24 +24,46 @@ class TSIGKeyRing;
 
 namespace python {
 
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGKey : public PyObject {
-public:
-    s_TSIGKey();
-    TSIGKey* cppobj;
-};
-
-class s_TSIGKeyRing : public PyObject {
-public:
-    s_TSIGKeyRing();
-    TSIGKeyRing* cppobj;
-};
-
 extern PyTypeObject tsigkey_type;
 extern PyTypeObject tsigkeyring_type;
 
-bool initModulePart_TSIGKey(PyObject* mod);
-bool initModulePart_TSIGKeyRing(PyObject* mod);
+/// \brief Checks if the given python object is a TSIGKey object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGKey, false otherwise
+bool PyTSIGKey_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGKey object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type TSIGKey; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyTSIGKey_Check()
+///
+/// \note This is not a copy; if the TSIGKey is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigkey_obj The tsigkey object to convert
+const TSIGKey& PyTSIGKey_ToTSIGKey(const PyObject* tsigkey_obj);
+
+/// \brief Checks if the given python object is a TSIGKeyRing object
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGKeyRing, false otherwise
+bool PyTSIGKeyRing_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGKeyRing object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type TSIGKeyRing; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyTSIGKeyRing_Check()
+///
+/// \note This is not a copy; if the TSIGKeyRing is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param tsigkeyring_obj The tsigkeyring object to convert
+const TSIGKeyRing& PyTSIGKeyRing_ToTSIGKeyRing(const PyObject* tsigkeyring_obj);
 
 } // namespace python
 } // namespace dns
diff --git a/src/lib/dns/python/tsigrecord_python.cc b/src/lib/dns/python/tsigrecord_python.cc
index 8a78b5e..c754dd2 100644
--- a/src/lib/dns/python/tsigrecord_python.cc
+++ b/src/lib/dns/python/tsigrecord_python.cc
@@ -32,10 +32,6 @@ using namespace isc::util::python;
 using namespace isc::dns;
 using namespace isc::dns::python;
 
-//
-// Definition of the classes
-//
-
 // For each class, we need a struct, a helper functions (init, destroy,
 // and static wrappers around the methods we export), a list of methods,
 // and a type description
@@ -44,11 +40,14 @@ using namespace isc::dns::python;
 // TSIGRecord
 //
 
-// Trivial constructor.
-s_TSIGRecord::s_TSIGRecord() : cppobj(NULL) {
-}
-
 namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_TSIGRecord : public PyObject {
+public:
+    s_TSIGRecord() : cppobj(NULL) {};
+    TSIGRecord* cppobj;
+};
+
 // Shortcut type which would be convenient for adding class variables safely.
 typedef CPPPyObjectContainer<s_TSIGRecord, TSIGRecord> TSIGRecordContainer;
 
@@ -102,11 +101,12 @@ PyMethodDef TSIGRecord_methods[] = {
 int
 TSIGRecord_init(s_TSIGRecord* self, PyObject* args) {
     try {
-        const s_Name* py_name;
-        const s_TSIG* py_tsig;
+        const PyObject* py_name;
+        const PyObject* py_tsig;
         if (PyArg_ParseTuple(args, "O!O!", &name_type, &py_name,
                              &tsig_type, &py_tsig)) {
-            self->cppobj = new TSIGRecord(*py_name->cppobj, *py_tsig->cppobj);
+            self->cppobj = new TSIGRecord(PyName_ToName(py_name),
+                                          PyTSIG_ToTSIG(py_tsig));
             return (0);
         }
     } catch (const exception& ex) {
@@ -226,7 +226,7 @@ PyTypeObject tsigrecord_type = {
     NULL,                               // tp_as_number
     NULL,                               // tp_as_sequence
     NULL,                               // tp_as_mapping
-    NULL,                               // tp_hash 
+    NULL,                               // tp_hash
     NULL,                               // tp_call
     TSIGRecord_str,                       // tp_str
     NULL,                               // tp_getattro
@@ -262,50 +262,32 @@ PyTypeObject tsigrecord_type = {
     0                                   // tp_version_tag
 };
 
-// Module Initialization, all statics are initialized here
+PyObject*
+createTSIGRecordObject(const TSIGRecord& source) {
+    TSIGRecordContainer container(PyObject_New(s_TSIGRecord, &tsigrecord_type));
+    container.set(new TSIGRecord(source));
+    return (container.release());
+}
+
 bool
-initModulePart_TSIGRecord(PyObject* mod) {
-    // We initialize the static description object with PyType_Ready(),
-    // then add it to the module. This is not just a check! (leaving
-    // this out results in segmentation faults)
-    if (PyType_Ready(&tsigrecord_type) < 0) {
-        return (false);
-    }
-    void* p = &tsigrecord_type;
-    if (PyModule_AddObject(mod, "TSIGRecord", static_cast<PyObject*>(p)) < 0) {
-        return (false);
+PyTSIGRecord_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException, "obj argument NULL in typecheck");
     }
-    Py_INCREF(&tsigrecord_type);
+    return (PyObject_TypeCheck(obj, &tsigrecord_type));
+}
 
-    // The following template is the typical procedure for installing class
-    // variables.  If the class doesn't have a class variable, remove the
-    // entire try-catch clauses.
-    try {
-        // Constant class variables
-        installClassVariable(tsigrecord_type, "TSIG_TTL",
-                             Py_BuildValue("I", 0));
-    } catch (const exception& ex) {
-        const string ex_what =
-            "Unexpected failure in TSIGRecord initialization: " +
-            string(ex.what());
-        PyErr_SetString(po_IscException, ex_what.c_str());
-        return (false);
-    } catch (...) {
-        PyErr_SetString(PyExc_SystemError,
-                        "Unexpected failure in TSIGRecord initialization");
-        return (false);
+const TSIGRecord&
+PyTSIGRecord_ToTSIGRecord(PyObject* tsigrecord_obj) {
+    if (tsigrecord_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in TSIGRecord PyObject conversion");
     }
-
-    return (true);
+    s_TSIGRecord* tsigrecord = static_cast<s_TSIGRecord*>(tsigrecord_obj);
+    return (*tsigrecord->cppobj);
 }
 
-PyObject*
-createTSIGRecordObject(const TSIGRecord& source) {
-    TSIGRecordContainer container = PyObject_New(s_TSIGRecord,
-                                                 &tsigrecord_type);
-    container.set(new TSIGRecord(source));
-    return (container.release());
-}
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/python/tsigrecord_python.h b/src/lib/dns/python/tsigrecord_python.h
index e0a3526..d6252e1 100644
--- a/src/lib/dns/python/tsigrecord_python.h
+++ b/src/lib/dns/python/tsigrecord_python.h
@@ -23,17 +23,9 @@ class TSIGRecord;
 
 namespace python {
 
-// The s_* Class simply covers one instantiation of the object
-class s_TSIGRecord : public PyObject {
-public:
-    s_TSIGRecord();
-    TSIGRecord* cppobj;
-};
 
 extern PyTypeObject tsigrecord_type;
 
-bool initModulePart_TSIGRecord(PyObject* mod);
-
 /// This is A simple shortcut to create a python TSIGRecord object (in the
 /// form of a pointer to PyObject) with minimal exception safety.
 /// On success, it returns a valid pointer to PyObject with a reference
@@ -43,6 +35,26 @@ bool initModulePart_TSIGRecord(PyObject* mod);
 /// followed by necessary setup for python exception.
 PyObject* createTSIGRecordObject(const TSIGRecord& source);
 
+/// \brief Checks if the given python object is a TSIGRecord object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type TSIGRecord, false otherwise
+bool PyTSIGRecord_Check(PyObject* obj);
+
+/// \brief Returns a reference to the TSIGRecord object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type TSIGRecord; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PyTSIGRecord_Check()
+///
+/// \note This is not a copy; if the TSIGRecord is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param rrtype_obj The rrtype object to convert
+const TSIGRecord& PyTSIGRecord_ToTSIGRecord(PyObject* tsigrecord_obj);
+
 } // namespace python
 } // namespace dns
 } // namespace isc
diff --git a/src/lib/dns/rdata/any_255/tsig_250.cc b/src/lib/dns/rdata/any_255/tsig_250.cc
index 2557965..4eb72bc 100644
--- a/src/lib/dns/rdata/any_255/tsig_250.cc
+++ b/src/lib/dns/rdata/any_255/tsig_250.cc
@@ -19,9 +19,11 @@
 #include <boost/lexical_cast.hpp>
 
 #include <util/buffer.h>
+#include <util/strutil.h>
 #include <util/encode/base64.h>
 
 #include <dns/messagerenderer.h>
+#include <dns/name.h>
 #include <dns/rdata.h>
 #include <dns/rdataclass.h>
 #include <dns/tsigerror.h>
@@ -30,6 +32,7 @@ using namespace std;
 using namespace boost;
 using namespace isc::util;
 using namespace isc::util::encode;
+using namespace isc::util::str;
 
 // BEGIN_ISC_NAMESPACE
 // BEGIN_RDATA_NAMESPACE
@@ -65,45 +68,6 @@ struct TSIG::TSIGImpl {
     const vector<uint8_t> other_data_;
 };
 
-namespace {
-string
-getToken(istringstream& iss, const string& full_input) {
-    string token;
-    iss >> token;
-    if (iss.bad() || iss.fail()) {
-        isc_throw(InvalidRdataText, "Invalid TSIG text: parse error " <<
-                  full_input);
-    }
-    return (token);
-}
-
-// This helper function converts a string token to an *unsigned* integer.
-// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
-// wide to store resulting integers.
-// BitSize is the maximum number of bits that the resulting integer can take.
-// This function first checks whether the given token can be converted to
-// an integer of NumType type.  It then confirms the conversion result is
-// within the valid range, i.e., [0, 2^NumType - 1].  The second check is
-// necessary because lexical_cast<T> where T is an unsigned integer type
-// doesn't correctly reject negative numbers when compiled with SunStudio.
-template <typename NumType, int BitSize>
-NumType
-tokenToNum(const string& num_token) {
-    NumType num;
-    try {
-        num = lexical_cast<NumType>(num_token);
-    } catch (const boost::bad_lexical_cast& ex) {
-        isc_throw(InvalidRdataText, "Invalid TSIG numeric parameter: " <<
-                  num_token);
-    }
-    if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
-        isc_throw(InvalidRdataText, "Numeric TSIG parameter out of range: " <<
-                  num);
-    }
-    return (num);
-}
-}
-
 /// \brief Constructor from string.
 ///
 /// \c tsig_str must be formatted as follows:
@@ -148,47 +112,52 @@ tokenToNum(const string& num_token) {
 TSIG::TSIG(const std::string& tsig_str) : impl_(NULL) {
     istringstream iss(tsig_str);
 
-    const Name algorithm(getToken(iss, tsig_str));
-    const int64_t time_signed = tokenToNum<int64_t, 48>(getToken(iss,
-                                                                 tsig_str));
-    const int32_t fudge = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-    const int32_t macsize = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-
-    const string mac_txt = (macsize > 0) ? getToken(iss, tsig_str) : "";
-    vector<uint8_t> mac;
-    decodeBase64(mac_txt, mac);
-    if (mac.size() != macsize) {
-        isc_throw(InvalidRdataText, "TSIG MAC size and data are inconsistent");
-    }
-
-    const int32_t orig_id = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-
-    const string error_txt = getToken(iss, tsig_str);
-    int32_t error = 0;
-    // XXX: In the initial implementation we hardcode the mnemonics.
-    // We'll soon generalize this.
-    if (error_txt == "BADSIG") {
-        error = 16;
-    } else if (error_txt == "BADKEY") {
-        error = 17;
-    } else if (error_txt == "BADTIME") {
-        error = 18;
-    } else {
-        error = tokenToNum<int32_t, 16>(error_txt);
-    }
-
-    const int32_t otherlen = tokenToNum<int32_t, 16>(getToken(iss, tsig_str));
-    const string otherdata_txt = (otherlen > 0) ? getToken(iss, tsig_str) : "";
-    vector<uint8_t> other_data;
-    decodeBase64(otherdata_txt, other_data);
-
-    if (!iss.eof()) {
-        isc_throw(InvalidRdataText, "Unexpected input for TSIG RDATA: " <<
-                  tsig_str);
+    try {
+        const Name algorithm(getToken(iss));
+        const int64_t time_signed = tokenToNum<int64_t, 48>(getToken(iss));
+        const int32_t fudge = tokenToNum<int32_t, 16>(getToken(iss));
+        const int32_t macsize = tokenToNum<int32_t, 16>(getToken(iss));
+
+        const string mac_txt = (macsize > 0) ? getToken(iss) : "";
+        vector<uint8_t> mac;
+        decodeBase64(mac_txt, mac);
+        if (mac.size() != macsize) {
+            isc_throw(InvalidRdataText, "TSIG MAC size and data are inconsistent");
+        }
+
+        const int32_t orig_id = tokenToNum<int32_t, 16>(getToken(iss));
+
+        const string error_txt = getToken(iss);
+        int32_t error = 0;
+        // XXX: In the initial implementation we hardcode the mnemonics.
+        // We'll soon generalize this.
+        if (error_txt == "BADSIG") {
+            error = 16;
+        } else if (error_txt == "BADKEY") {
+            error = 17;
+        } else if (error_txt == "BADTIME") {
+            error = 18;
+        } else {
+            error = tokenToNum<int32_t, 16>(error_txt);
+        }
+
+        const int32_t otherlen = tokenToNum<int32_t, 16>(getToken(iss));
+        const string otherdata_txt = (otherlen > 0) ? getToken(iss) : "";
+        vector<uint8_t> other_data;
+        decodeBase64(otherdata_txt, other_data);
+
+        if (!iss.eof()) {
+            isc_throw(InvalidRdataText, "Unexpected input for TSIG RDATA: " <<
+                    tsig_str);
+        }
+
+        impl_ = new TSIGImpl(algorithm, time_signed, fudge, mac, orig_id,
+                            error, other_data);
+
+    } catch (const StringTokenError& ste) {
+        isc_throw(InvalidRdataText, "Invalid TSIG text: " << ste.what() <<
+                  ": " << tsig_str);
     }
-
-    impl_ = new TSIGImpl(algorithm, time_signed, fudge, mac, orig_id,
-                         error, other_data);
 }
 
 /// \brief Constructor from wire-format data.
diff --git a/src/lib/dns/rdata/generic/afsdb_18.cc b/src/lib/dns/rdata/generic/afsdb_18.cc
new file mode 100644
index 0000000..6afc4de
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <boost/lexical_cast.hpp>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c afsdb_str must be formatted as follows:
+/// \code <subtype> <server name>
+/// \endcode
+/// where server name field must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "1 server.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+AFSDB::AFSDB(const std::string& afsdb_str) :
+    subtype_(0), server_(Name::ROOT_NAME())
+{
+    istringstream iss(afsdb_str);
+
+    try {
+        const uint32_t subtype = tokenToNum<int32_t, 16>(getToken(iss));
+        const Name servername(getToken(iss));
+        string server;
+
+        if (!iss.eof()) {
+            isc_throw(InvalidRdataText, "Unexpected input for AFSDB"
+                    "RDATA: " << afsdb_str);
+        }
+
+        subtype_ = subtype;
+        server_ = servername;
+
+    } catch (const StringTokenError& ste) {
+        isc_throw(InvalidRdataText, "Invalid AFSDB text: " <<
+                  ste.what() << ": " << afsdb_str);
+    }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+AFSDB::AFSDB(InputBuffer& buffer, size_t) :
+    subtype_(buffer.readUint16()), server_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+AFSDB::AFSDB(const AFSDB& other) :
+    Rdata(), subtype_(other.subtype_), server_(other.server_)
+{}
+
+AFSDB&
+AFSDB::operator=(const AFSDB& source) {
+    subtype_ = source.subtype_;
+    server_ = source.server_;
+
+    return (*this);
+}
+
+/// \brief Convert the \c AFSDB to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c AFSDB(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c AFSDB object.
+string
+AFSDB::toText() const {
+    return (boost::lexical_cast<string>(subtype_) + " " + server_.toText());
+}
+
+/// \brief Render the \c AFSDB in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+AFSDB::toWire(OutputBuffer& buffer) const {
+    buffer.writeUint16(subtype_);
+    server_.toWire(buffer);
+}
+
+/// \brief Render the \c AFSDB in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE AFSDB is not "well-known", the server
+/// field (domain name) will not be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+AFSDB::toWire(AbstractMessageRenderer& renderer) const {
+    renderer.writeUint16(subtype_);
+    renderer.writeName(server_, false);
+}
+
+/// \brief Compare two instances of \c AFSDB RDATA.
+///
+/// See documentation in \c Rdata.
+int
+AFSDB::compare(const Rdata& other) const {
+    const AFSDB& other_afsdb = dynamic_cast<const AFSDB&>(other);
+    if (subtype_ < other_afsdb.subtype_) {
+        return (-1);
+    } else if (subtype_ > other_afsdb.subtype_) {
+        return (1);
+    }
+
+    return (compareNames(server_, other_afsdb.server_));
+}
+
+const Name&
+AFSDB::getServer() const {
+    return (server_);
+}
+
+uint16_t
+AFSDB::getSubtype() const {
+    return (subtype_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/afsdb_18.h b/src/lib/dns/rdata/generic/afsdb_18.h
new file mode 100644
index 0000000..4a46775
--- /dev/null
+++ b/src/lib/dns/rdata/generic/afsdb_18.h
@@ -0,0 +1,74 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::AFSDB class represents the AFSDB RDATA as defined %in
+/// RFC1183.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// AFSDB RDATA.
+class AFSDB : public Rdata {
+public:
+    // BEGIN_COMMON_MEMBERS
+    // END_COMMON_MEMBERS
+
+    /// \brief Assignment operator.
+    ///
+    /// This method never throws an exception.
+    AFSDB& operator=(const AFSDB& source);
+    ///
+    /// Specialized methods
+    ///
+
+    /// \brief Return the value of the server field.
+    ///
+    /// \return A reference to a \c Name class object corresponding to the
+    /// internal server name.
+    ///
+    /// This method never throws an exception.
+    const Name& getServer() const;
+
+    /// \brief Return the value of the subtype field.
+    ///
+    /// This method never throws an exception.
+    uint16_t getSubtype() const;
+
+private:
+    uint16_t subtype_;
+    Name server_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/detail/ds_like.h b/src/lib/dns/rdata/generic/detail/ds_like.h
new file mode 100644
index 0000000..b5a35cd
--- /dev/null
+++ b/src/lib/dns/rdata/generic/detail/ds_like.h
@@ -0,0 +1,225 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __DS_LIKE_H
+#define __DS_LIKE_H 1
+
+#include <stdint.h>
+
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+namespace isc {
+namespace dns {
+namespace rdata {
+namespace generic {
+namespace detail {
+
+/// \brief \c rdata::DSLikeImpl class represents the DS-like RDATA for DS
+/// and DLV types.
+///
+/// This class implements the basic interfaces inherited by the DS and DLV
+/// classes from the abstract \c rdata::Rdata class, and provides trivial
+/// accessors to DS-like RDATA.
+template <class Type, uint16_t typeCode> class DSLikeImpl {
+    // Common sequence of toWire() operations used for the two versions of
+    // toWire().
+    template <typename Output>
+    void
+    toWireCommon(Output& output) const {
+        output.writeUint16(tag_);
+        output.writeUint8(algorithm_);
+        output.writeUint8(digest_type_);
+        output.writeData(&digest_[0], digest_.size());
+    }
+
+public:
+    /// \brief Constructor from string.
+    ///
+    /// <b>Exceptions</b>
+    ///
+    /// \c InvalidRdataText is thrown if the method cannot process the
+    /// parameter data for any of the number of reasons.
+    DSLikeImpl(const std::string& ds_str) {
+        std::istringstream iss(ds_str);
+        // peekc should be of iss's char_type for isspace to work
+        std::istringstream::char_type peekc;
+        std::stringbuf digestbuf;
+        uint32_t tag, algorithm, digest_type;
+
+        iss >> tag >> algorithm >> digest_type;
+        if (iss.bad() || iss.fail()) {
+            isc_throw(InvalidRdataText,
+                      "Invalid " << RRType(typeCode) << " text");
+        }
+        if (tag > 0xffff) {
+            isc_throw(InvalidRdataText,
+                      RRType(typeCode) << " tag out of range");
+        }
+        if (algorithm > 0xff) {
+            isc_throw(InvalidRdataText,
+                      RRType(typeCode) << " algorithm out of range");
+        }
+        if (digest_type > 0xff) {
+            isc_throw(InvalidRdataText,
+                      RRType(typeCode) << " digest type out of range");
+        }
+
+        iss.read(&peekc, 1);
+        if (!iss.good() || !isspace(peekc, iss.getloc())) {
+            isc_throw(InvalidRdataText,
+                      RRType(typeCode) << " presentation format error");
+        }
+
+        iss >> &digestbuf;
+
+        tag_ = tag;
+        algorithm_ = algorithm;
+        digest_type_ = digest_type;
+        decodeHex(digestbuf.str(), digest_);
+    }
+
+    /// \brief Constructor from wire-format data.
+    ///
+    /// \param buffer A buffer storing the wire format data.
+    /// \param rdata_len The length of the RDATA in bytes, normally expected
+    /// to be the value of the RDLENGTH field of the corresponding RR.
+    ///
+    /// <b>Exceptions</b>
+    ///
+    /// \c InvalidRdataLength is thrown if the input data is too short for the
+    /// type.
+    DSLikeImpl(InputBuffer& buffer, size_t rdata_len) {
+        if (rdata_len < 4) {
+            isc_throw(InvalidRdataLength, RRType(typeCode) << " too short");
+        }
+
+        tag_ = buffer.readUint16();
+        algorithm_ = buffer.readUint8();
+        digest_type_ = buffer.readUint8();
+
+        rdata_len -= 4;
+        digest_.resize(rdata_len);
+        buffer.readData(&digest_[0], rdata_len);
+    }
+
+    /// \brief The copy constructor.
+    ///
+    /// Trivial for now, we could've used the default one.
+    DSLikeImpl(const DSLikeImpl& source) {
+        digest_ = source.digest_;
+        tag_ = source.tag_;
+        algorithm_ = source.algorithm_;
+        digest_type_ = source.digest_type_;
+    }
+
+    /// \brief Convert the DS-like data to a string.
+    ///
+    /// \return A \c string object that represents the DS-like data.
+    std::string
+    toText() const {
+        using namespace boost;
+        return (lexical_cast<string>(static_cast<int>(tag_)) +
+            " " + lexical_cast<string>(static_cast<int>(algorithm_)) +
+            " " + lexical_cast<string>(static_cast<int>(digest_type_)) +
+            " " + encodeHex(digest_));
+    }
+
+    /// \brief Render the DS-like data in the wire format to an OutputBuffer
+    /// object.
+    ///
+    /// \param buffer An output buffer to store the wire data.
+    void
+    toWire(OutputBuffer& buffer) const {
+        toWireCommon(buffer);
+    }
+
+    /// \brief Render the DS-like data in the wire format to an
+    /// AbstractMessageRenderer object.
+    ///
+    /// \param renderer A renderer object to send the wire data to.
+    void
+    toWire(AbstractMessageRenderer& renderer) const {
+        toWireCommon(renderer);
+    }
+
+    /// \brief Compare two instances of DS-like RDATA.
+    ///
+    /// It is up to the caller to make sure that \c other is an object of the
+    /// same \c DSLikeImpl class.
+    ///
+    /// \param other the right-hand operand to compare against.
+    /// \return < 0 if \c this would be sorted before \c other.
+    /// \return 0 if \c this is identical to \c other in terms of sorting
+    /// order.
+    /// \return > 0 if \c this would be sorted after \c other.
+    int
+    compare(const DSLikeImpl& other_ds) const {
+        if (tag_ != other_ds.tag_) {
+            return (tag_ < other_ds.tag_ ? -1 : 1);
+        }
+        if (algorithm_ != other_ds.algorithm_) {
+            return (algorithm_ < other_ds.algorithm_ ? -1 : 1);
+        }
+        if (digest_type_ != other_ds.digest_type_) {
+            return (digest_type_ < other_ds.digest_type_ ? -1 : 1);
+        }
+
+        size_t this_len = digest_.size();
+        size_t other_len = other_ds.digest_.size();
+        size_t cmplen = min(this_len, other_len);
+        int cmp = memcmp(&digest_[0], &other_ds.digest_[0], cmplen);
+        if (cmp != 0) {
+            return (cmp);
+        } else {
+            return ((this_len == other_len)
+                    ? 0 : (this_len < other_len) ? -1 : 1);
+        }
+    }
+
+    /// \brief Accessors
+    uint16_t
+    getTag() const {
+        return (tag_);
+    }
+
+private:
+    // straightforward representation of DS RDATA fields
+    uint16_t tag_;
+    uint8_t algorithm_;
+    uint8_t digest_type_;
+    std::vector<uint8_t> digest_;
+};
+
+}
+}
+}
+}
+}
+#endif //  __DS_LIKE_H
+
+// Local Variables: 
+// mode: c++
+// End: 
diff --git a/src/lib/dns/rdata/generic/detail/txt_like.h b/src/lib/dns/rdata/generic/detail/txt_like.h
new file mode 100644
index 0000000..a0ab7ac
--- /dev/null
+++ b/src/lib/dns/rdata/generic/detail/txt_like.h
@@ -0,0 +1,224 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __TXT_LIKE_H
+#define __TXT_LIKE_H 1
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+using namespace std;
+using namespace isc::util;
+
+/// \brief \c rdata::TXTLikeImpl class represents the TXT-like RDATA for TXT
+/// and SPF types.
+///
+/// This class implements the basic interfaces inherited by the TXT and SPF
+/// classes from the abstract \c rdata::Rdata class, and provides trivial
+/// accessors to TXT-like RDATA.
+template<class Type, uint16_t typeCode>class TXTLikeImpl {
+public:
+    /// \brief Constructor from wire-format data.
+    ///
+    /// \param buffer A buffer storing the wire format data.
+    /// \param rdata_len The length of the RDATA in bytes, normally expected
+    /// to be the value of the RDLENGTH field of the corresponding RR.
+    ///
+    /// <b>Exceptions</b>
+    ///
+    /// \c InvalidRdataLength is thrown if rdata_len exceeds the maximum.
+    /// \c DNSMessageFORMERR is thrown if the RR is misformed.
+    TXTLikeImpl(InputBuffer& buffer, size_t rdata_len) {
+        if (rdata_len > MAX_RDLENGTH) {
+            isc_throw(InvalidRdataLength, "RDLENGTH too large: " << rdata_len);
+        }
+
+        if (rdata_len == 0) {    // note that this couldn't happen in the loop.
+            isc_throw(DNSMessageFORMERR, "Error in parsing " <<
+                      RRType(typeCode) << " RDATA: 0-length character string");
+        }
+
+        do {
+            const uint8_t len = buffer.readUint8();
+            if (rdata_len < len + 1) {
+                isc_throw(DNSMessageFORMERR, "Error in parsing " <<
+                          RRType(typeCode) <<
+                          " RDATA: character string length is too large: " <<
+                          static_cast<int>(len));
+            }
+            vector<uint8_t> data(len + 1);
+            data[0] = len;
+            buffer.readData(&data[0] + 1, len);
+            string_list_.push_back(data);
+
+            rdata_len -= (len + 1);
+        } while (rdata_len > 0);
+    }
+
+    /// \brief Constructor from string.
+    ///
+    /// <b>Exceptions</b>
+    ///
+    /// \c CharStringTooLong is thrown if the parameter string length exceeds
+    /// maximum.
+    /// \c InvalidRdataText is thrown if the method cannot process the
+    /// parameter data.
+    explicit TXTLikeImpl(const std::string& txtstr) {
+        // TBD: this is a simple, incomplete implementation that only supports
+        // a single character-string.
+
+        size_t length = txtstr.size();
+        size_t pos_begin = 0;
+
+        if (length > 1 && txtstr[0] == '"' && txtstr[length - 1] == '"') {
+            pos_begin = 1;
+            length -= 2;
+        }
+
+        if (length > MAX_CHARSTRING_LEN) {
+            isc_throw(CharStringTooLong, RRType(typeCode) <<
+                      " RDATA construction from text:"
+                      " string length is too long: " << length);
+        }
+
+        // TBD: right now, we don't support escaped characters
+        if (txtstr.find('\\') != string::npos) {
+            isc_throw(InvalidRdataText, RRType(typeCode) <<
+                      " RDATA from text:"
+                      " escaped character is currently not supported: " <<
+                      txtstr);
+        }
+
+        vector<uint8_t> data;
+        data.reserve(length + 1);
+        data.push_back(length);
+        data.insert(data.end(), txtstr.begin() + pos_begin,
+                    txtstr.begin() + pos_begin + length);
+        string_list_.push_back(data);
+    }
+
+    /// \brief The copy constructor.
+    ///
+    /// Trivial for now, we could've used the default one.
+    TXTLikeImpl(const TXTLikeImpl& other) :
+        string_list_(other.string_list_)
+    {}
+
+    /// \brief Render the TXT-like data in the wire format to an OutputBuffer
+    /// object.
+    ///
+    /// \param buffer An output buffer to store the wire data.
+    void
+    toWire(OutputBuffer& buffer) const {
+        for (vector<vector<uint8_t> >::const_iterator it =
+                                                          string_list_.begin();
+             it != string_list_.end();
+             ++it)
+        {
+            buffer.writeData(&(*it)[0], (*it).size());
+        }
+    }
+
+    /// \brief Render the TXT-like data in the wire format to an
+    /// AbstractMessageRenderer object.
+    ///
+    /// \param buffer An output AbstractMessageRenderer to send the wire data
+    /// to.
+    void
+    toWire(AbstractMessageRenderer& renderer) const {
+        for (vector<vector<uint8_t> >::const_iterator it =
+                                                          string_list_.begin();
+             it != string_list_.end();
+             ++it)
+        {
+            renderer.writeData(&(*it)[0], (*it).size());
+        }
+    }
+
+    /// \brief Convert the TXT-like data to a string.
+    ///
+    /// \return A \c string object that represents the TXT-like data.
+    string
+    toText() const {
+        string s;
+
+        // XXX: this implementation is not entirely correct.  for example, it
+        // should escape double-quotes if they appear in the character string.
+        for (vector<vector<uint8_t> >::const_iterator it =
+                                                          string_list_.begin();
+             it != string_list_.end();
+             ++it)
+        {
+            if (!s.empty()) {
+                s.push_back(' ');
+            }
+            s.push_back('"');
+            s.insert(s.end(), (*it).begin() + 1, (*it).end());
+            s.push_back('"');
+        }
+
+        return (s);
+    }
+
+    /// \brief Compare two instances of TXT-like RDATA.
+    ///
+    /// It is up to the caller to make sure that \c other is an object of the
+    /// same \c TXTLikeImpl class.
+    ///
+    /// \param other the right-hand operand to compare against.
+    /// \return < 0 if \c this would be sorted before \c other.
+    /// \return 0 if \c this is identical to \c other in terms of sorting
+    /// order.
+    /// \return > 0 if \c this would be sorted after \c other.
+    int
+    compare(const TXTLikeImpl& other) const {
+        // This implementation is not efficient.  Revisit this (TBD).
+        OutputBuffer this_buffer(0);
+        toWire(this_buffer);
+        uint8_t const* const this_data = (uint8_t const*)this_buffer.getData();
+        size_t this_len = this_buffer.getLength();
+
+        OutputBuffer other_buffer(0);
+        other.toWire(other_buffer);
+        uint8_t const* const other_data
+                                      = (uint8_t const*)other_buffer.getData();
+        const size_t other_len = other_buffer.getLength();
+
+        const size_t cmplen = min(this_len, other_len);
+        const int cmp = memcmp(this_data, other_data, cmplen);
+
+        if (cmp != 0) {
+            return (cmp);
+        } else {
+            return ((this_len == other_len) ? 0 :
+                    (this_len < other_len) ? -1 : 1);
+        }
+    }
+
+private:
+    /// Note: this is a prototype version; we may reconsider
+    /// this representation later.
+    std::vector<std::vector<uint8_t> > string_list_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+
+#endif //  __TXT_LIKE_H
+
+// Local Variables: 
+// mode: c++
+// End: 
diff --git a/src/lib/dns/rdata/generic/dlv_32769.cc b/src/lib/dns/rdata/generic/dlv_32769.cc
new file mode 100644
index 0000000..9887aa8
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.cc
@@ -0,0 +1,121 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+
+#include <util/buffer.h>
+#include <util/encode/hex.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+#include <dns/rdata/generic/detail/ds_like.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const string& ds_str) :
+    impl_(new DLVImpl(ds_str))
+{}
+
+/// \brief Constructor from wire-format data.
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(InputBuffer& buffer, size_t rdata_len) :
+    impl_(new DLVImpl(buffer, rdata_len))
+{}
+
+/// \brief Copy constructor
+///
+/// A copy of the implementation object is allocated and constructed.
+DLV::DLV(const DLV& source) :
+    Rdata(), impl_(new DLVImpl(*source.impl_))
+{}
+
+/// \brief Assignment operator
+///
+/// PIMPL-induced logic
+DLV&
+DLV::operator=(const DLV& source) {
+    if (impl_ == source.impl_) {
+        return (*this);
+    }
+
+    DLVImpl* newimpl = new DLVImpl(*source.impl_);
+    delete impl_;
+    impl_ = newimpl;
+
+    return (*this);
+}
+
+/// \brief Destructor
+///
+/// Deallocates an internal resource.
+DLV::~DLV() {
+    delete impl_;
+}
+
+/// \brief Convert the \c DLV to a string.
+///
+/// A pass-thru to the corresponding implementation method.
+string
+DLV::toText() const {
+    return (impl_->toText());
+}
+
+/// \brief Render the \c DLV in the wire format to a OutputBuffer object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(OutputBuffer& buffer) const {
+    impl_->toWire(buffer);
+}
+
+/// \brief Render the \c DLV in the wire format to a AbstractMessageRenderer
+/// object
+///
+/// A pass-thru to the corresponding implementation method.
+void
+DLV::toWire(AbstractMessageRenderer& renderer) const {
+    impl_->toWire(renderer);
+}
+
+/// \brief Compare two instances of \c DLV RDATA.
+///
+/// The type check is performed here. Otherwise, a pass-thru to the
+/// corresponding implementation method.
+int
+DLV::compare(const Rdata& other) const {
+    const DLV& other_ds = dynamic_cast<const DLV&>(other);
+
+    return (impl_->compare(*other_ds.impl_));
+}
+
+/// \brief Tag accessor
+uint16_t
+DLV::getTag() const {
+    return (impl_->getTag());
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/dlv_32769.h b/src/lib/dns/rdata/generic/dlv_32769.h
new file mode 100644
index 0000000..86cd98c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/dlv_32769.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
+
+/// \brief \c rdata::generic::DLV class represents the DLV RDATA as defined in
+/// RFC4431.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DLV RDATA.
+class DLV : public Rdata {
+public:
+    // BEGIN_COMMON_MEMBERS
+    // END_COMMON_MEMBERS
+
+    /// \brief Assignment operator.
+    ///
+    /// It internally allocates a resource, and if it fails a corresponding
+    /// standard exception will be thrown.
+    /// This operator never throws an exception otherwise.
+    ///
+    /// This operator provides the strong exception guarantee: When an
+    /// exception is thrown the content of the assignment target will be
+    /// intact.
+    DLV& operator=(const DLV& source);
+
+    /// \brief The destructor.
+    ~DLV();
+
+    /// \brief Return the value of the Tag field.
+    ///
+    /// This method never throws an exception.
+    uint16_t getTag() const;
+private:
+    typedef detail::DSLikeImpl<DLV, 32769> DLVImpl;
+    DLVImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables: 
+// mode: c++
+// End: 
diff --git a/src/lib/dns/rdata/generic/ds_43.cc b/src/lib/dns/rdata/generic/ds_43.cc
index 1b48456..20b62dc 100644
--- a/src/lib/dns/rdata/generic/ds_43.cc
+++ b/src/lib/dns/rdata/generic/ds_43.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
 //
 // Permission to use, copy, modify, and/or distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
@@ -12,87 +12,32 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include <iostream>
 #include <string>
-#include <sstream>
-#include <vector>
-
-#include <boost/lexical_cast.hpp>
 
 #include <util/buffer.h>
 #include <util/encode/hex.h>
 
 #include <dns/messagerenderer.h>
-#include <dns/name.h>
 #include <dns/rdata.h>
 #include <dns/rdataclass.h>
 
-#include <stdio.h>
-#include <time.h>
+#include <dns/rdata/generic/detail/ds_like.h>
 
 using namespace std;
 using namespace isc::util;
 using namespace isc::util::encode;
+using namespace isc::dns::rdata::generic::detail;
 
 // BEGIN_ISC_NAMESPACE
 // BEGIN_RDATA_NAMESPACE
 
-struct DSImpl {
-    // straightforward representation of DS RDATA fields
-    DSImpl(uint16_t tag, uint8_t algorithm, uint8_t digest_type,
-           const vector<uint8_t>& digest) :
-        tag_(tag), algorithm_(algorithm), digest_type_(digest_type),
-        digest_(digest)
-    {}
-
-    uint16_t tag_;
-    uint8_t algorithm_;
-    uint8_t digest_type_;
-    const vector<uint8_t> digest_;
-};
-
 DS::DS(const string& ds_str) :
-    impl_(NULL)
-{
-    istringstream iss(ds_str);
-    unsigned int tag, algorithm, digest_type;
-    stringbuf digestbuf;
-
-    iss >> tag >> algorithm >> digest_type >> &digestbuf;
-    if (iss.bad() || iss.fail()) {
-        isc_throw(InvalidRdataText, "Invalid DS text");
-    }
-    if (tag > 0xffff) {
-        isc_throw(InvalidRdataText, "DS tag out of range");
-    }
-    if (algorithm > 0xff) {
-        isc_throw(InvalidRdataText, "DS algorithm out of range");
-    }
-    if (digest_type > 0xff) {
-        isc_throw(InvalidRdataText, "DS digest type out of range");
-    }
-
-    vector<uint8_t> digest;
-    decodeHex(digestbuf.str(), digest);
-
-    impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
-
-DS::DS(InputBuffer& buffer, size_t rdata_len) {
-    if (rdata_len < 4) {
-        isc_throw(InvalidRdataLength, "DS too short");
-    }
-
-    uint16_t tag = buffer.readUint16();
-    uint16_t algorithm = buffer.readUint8();
-    uint16_t digest_type = buffer.readUint8();
-
-    rdata_len -= 4;
-    vector<uint8_t> digest(rdata_len);
-    buffer.readData(&digest[0], rdata_len);
+    impl_(new DSImpl(ds_str))
+{}
 
-    impl_ = new DSImpl(tag, algorithm, digest_type, digest);
-}
+DS::DS(InputBuffer& buffer, size_t rdata_len) :
+    impl_(new DSImpl(buffer, rdata_len))
+{}
 
 DS::DS(const DS& source) :
     Rdata(), impl_(new DSImpl(*source.impl_))
@@ -117,57 +62,29 @@ DS::~DS() {
 
 string
 DS::toText() const {
-    using namespace boost;
-    return (lexical_cast<string>(static_cast<int>(impl_->tag_)) +
-        " " + lexical_cast<string>(static_cast<int>(impl_->algorithm_)) +
-        " " + lexical_cast<string>(static_cast<int>(impl_->digest_type_)) +
-        " " + encodeHex(impl_->digest_));
+    return (impl_->toText());
 }
 
 void
 DS::toWire(OutputBuffer& buffer) const {
-    buffer.writeUint16(impl_->tag_);
-    buffer.writeUint8(impl_->algorithm_);
-    buffer.writeUint8(impl_->digest_type_);
-    buffer.writeData(&impl_->digest_[0], impl_->digest_.size());
+    impl_->toWire(buffer);
 }
 
 void
 DS::toWire(AbstractMessageRenderer& renderer) const {
-    renderer.writeUint16(impl_->tag_);
-    renderer.writeUint8(impl_->algorithm_);
-    renderer.writeUint8(impl_->digest_type_);
-    renderer.writeData(&impl_->digest_[0], impl_->digest_.size());
+    impl_->toWire(renderer);
 }
 
 int
 DS::compare(const Rdata& other) const {
     const DS& other_ds = dynamic_cast<const DS&>(other);
 
-    if (impl_->tag_ != other_ds.impl_->tag_) {
-        return (impl_->tag_ < other_ds.impl_->tag_ ? -1 : 1);
-    }
-    if (impl_->algorithm_ != other_ds.impl_->algorithm_) {
-        return (impl_->algorithm_ < other_ds.impl_->algorithm_ ? -1 : 1);
-    }
-    if (impl_->digest_type_ != other_ds.impl_->digest_type_) {
-        return (impl_->digest_type_ < other_ds.impl_->digest_type_ ? -1 : 1);
-    }
-
-    size_t this_len = impl_->digest_.size();
-    size_t other_len = other_ds.impl_->digest_.size();
-    size_t cmplen = min(this_len, other_len);
-    int cmp = memcmp(&impl_->digest_[0], &other_ds.impl_->digest_[0], cmplen);
-    if (cmp != 0) {
-        return (cmp);
-    } else {
-        return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
-    }
+    return (impl_->compare(*other_ds.impl_));
 }
 
 uint16_t
 DS::getTag() const {
-    return (impl_->tag_);
+    return (impl_->getTag());
 }
 
 // END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/ds_43.h b/src/lib/dns/rdata/generic/ds_43.h
index 03b19a0..2697f51 100644
--- a/src/lib/dns/rdata/generic/ds_43.h
+++ b/src/lib/dns/rdata/generic/ds_43.h
@@ -1,4 +1,4 @@
-// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
 //
 // Permission to use, copy, modify, and/or distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
@@ -12,6 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+// BEGIN_HEADER_GUARD
+
 #include <stdint.h>
 
 #include <string>
@@ -21,8 +23,6 @@
 #include <dns/rrttl.h>
 #include <dns/rdata.h>
 
-// BEGIN_HEADER_GUARD
-
 // BEGIN_ISC_NAMESPACE
 
 // BEGIN_COMMON_DECLARATIONS
@@ -30,20 +30,41 @@
 
 // BEGIN_RDATA_NAMESPACE
 
-struct DSImpl;
+namespace detail {
+template <class Type, uint16_t typeCode> class DSLikeImpl;
+}
 
+/// \brief \c rdata::generic::DS class represents the DS RDATA as defined in
+/// RFC3658.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DS RDATA.
 class DS : public Rdata {
 public:
     // BEGIN_COMMON_MEMBERS
     // END_COMMON_MEMBERS
+
+    /// \brief Assignment operator.
+    ///
+    /// It internally allocates a resource, and if it fails a corresponding
+    /// standard exception will be thrown.
+    /// This operator never throws an exception otherwise.
+    ///
+    /// This operator provides the strong exception guarantee: When an
+    /// exception is thrown the content of the assignment target will be
+    /// intact.
     DS& operator=(const DS& source);
+
+    /// \brief The destructor.
     ~DS();
 
+    /// \brief Return the value of the Tag field.
     ///
-    /// Specialized methods
-    ///
+    /// This method never throws an exception.
     uint16_t getTag() const;
 private:
+    typedef detail::DSLikeImpl<DS, 43> DSImpl;
     DSImpl* impl_;
 };
 
diff --git a/src/lib/dns/rdata/generic/hinfo_13.cc b/src/lib/dns/rdata/generic/hinfo_13.cc
new file mode 100644
index 0000000..45f4209
--- /dev/null
+++ b/src/lib/dns/rdata/generic/hinfo_13.cc
@@ -0,0 +1,129 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/character_string.h>
+#include <util/strutil.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+
+HINFO::HINFO(const string& hinfo_str) {
+    string::const_iterator input_iterator = hinfo_str.begin();
+    cpu_ = getNextCharacterString(hinfo_str, input_iterator);
+
+    skipLeftSpaces(hinfo_str, input_iterator);
+
+    os_ = getNextCharacterString(hinfo_str, input_iterator);
+}
+
+HINFO::HINFO(InputBuffer& buffer, size_t rdata_len) {
+    cpu_ = getNextCharacterString(buffer, rdata_len);
+    os_ = getNextCharacterString(buffer, rdata_len);
+}
+
+HINFO::HINFO(const HINFO& source):
+    Rdata(), cpu_(source.cpu_), os_(source.os_)
+{
+}
+
+std::string
+HINFO::toText() const {
+    string result;
+    result += "\"";
+    result += cpu_;
+    result += "\" \"";
+    result += os_;
+    result += "\"";
+    return (result);
+}
+
+void
+HINFO::toWire(OutputBuffer& buffer) const {
+    toWireHelper(buffer);
+}
+
+void
+HINFO::toWire(AbstractMessageRenderer& renderer) const {
+    toWireHelper(renderer);
+}
+
+int
+HINFO::compare(const Rdata& other) const {
+    const HINFO& other_hinfo = dynamic_cast<const HINFO&>(other);
+
+    if (cpu_ < other_hinfo.cpu_) {
+        return (-1);
+    } else if (cpu_ > other_hinfo.cpu_) {
+        return (1);
+    }
+
+    if (os_ < other_hinfo.os_) {
+        return (-1);
+    } else if (os_ > other_hinfo.os_) {
+        return (1);
+    }
+
+    return (0);
+}
+
+const std::string&
+HINFO::getCPU() const {
+    return (cpu_);
+}
+
+const std::string&
+HINFO::getOS() const {
+    return (os_);
+}
+
+void
+HINFO::skipLeftSpaces(const std::string& input_str,
+                      std::string::const_iterator& input_iterator)
+{
+    if (input_iterator >= input_str.end()) {
+        isc_throw(InvalidRdataText,
+                  "Invalid HINFO text format, field is missing.");
+    }
+
+    if (!isspace(*input_iterator)) {
+        isc_throw(InvalidRdataText,
+            "Invalid HINFO text format, fields are not separated by space.");
+    }
+    // Skip white spaces
+    while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+        ++input_iterator;
+    }
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/hinfo_13.h b/src/lib/dns/rdata/generic/hinfo_13.h
new file mode 100644
index 0000000..8513419
--- /dev/null
+++ b/src/lib/dns/rdata/generic/hinfo_13.h
@@ -0,0 +1,77 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+#include <stdint.h>
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c HINFO class represents the HINFO rdata defined in
+/// RFC1034, RFC1035
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// HINFO rdata.
+class HINFO : public Rdata {
+public:
+    // BEGIN_COMMON_MEMBERS
+    // END_COMMON_MEMBERS
+
+    // HINFO specific methods
+    const std::string& getCPU() const;
+    const std::string& getOS() const;
+
+private:
+    /// Skip the left whitespaces of the input string
+    ///
+    /// \param input_str The input string
+    /// \param input_iterator From which the skipping started
+    void skipLeftSpaces(const std::string& input_str,
+                        std::string::const_iterator& input_iterator);
+
+    /// Helper template function for toWire()
+    ///
+    /// \param outputer Where to write data in
+    template <typename T>
+    void toWireHelper(T& outputer) const {
+        outputer.writeUint8(cpu_.size());
+        outputer.writeData(cpu_.c_str(), cpu_.size());
+
+        outputer.writeUint8(os_.size());
+        outputer.writeData(os_.c_str(), os_.size());
+    }
+
+    std::string cpu_;
+    std::string os_;
+};
+
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/minfo_14.cc b/src/lib/dns/rdata/generic/minfo_14.cc
new file mode 100644
index 0000000..aa5272c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.cc
@@ -0,0 +1,156 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string>
+#include <sstream>
+
+#include <util/buffer.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \c minfo_str must be formatted as follows:
+/// \code <rmailbox name> <emailbox name>
+/// \endcode
+/// where both fields must represent a valid domain name.
+///
+/// An example of valid string is:
+/// \code "rmail.example.com. email.example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// \exception InvalidRdataText The number of RDATA fields (must be 2) is
+/// incorrect.
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the string is invalid.
+MINFO::MINFO(const std::string& minfo_str) :
+    // We cannot construct both names in the initialization list due to the
+    // necessary text processing, so we have to initialize them with a dummy
+    // name and replace them later.
+    rmailbox_(Name::ROOT_NAME()), emailbox_(Name::ROOT_NAME())
+{
+    istringstream iss(minfo_str);
+    string rmailbox_str, emailbox_str;
+    iss >> rmailbox_str >> emailbox_str;
+
+    // Validation: A valid MINFO RR must have exactly two fields.
+    if (iss.bad() || iss.fail()) {
+        isc_throw(InvalidRdataText, "Invalid MINFO text: " << minfo_str);
+    }
+    if (!iss.eof()) {
+        isc_throw(InvalidRdataText, "Invalid MINFO text (redundant field): "
+                  << minfo_str);
+    }
+
+    rmailbox_ = Name(rmailbox_str);
+    emailbox_ = Name(emailbox_str);
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// This constructor doesn't check the validity of the second parameter (rdata
+/// length) for parsing.
+/// If necessary, the caller will check consistency.
+///
+/// \exception std::bad_alloc Memory allocation for names fails.
+/// \exception Other The constructor of the \c Name class will throw if the
+/// names in the wire is invalid.
+MINFO::MINFO(InputBuffer& buffer, size_t) :
+    rmailbox_(buffer), emailbox_(buffer)
+{}
+
+/// \brief Copy constructor.
+///
+/// \exception std::bad_alloc Memory allocation fails in copying internal
+/// member variables (this should be very rare).
+MINFO::MINFO(const MINFO& other) :
+    Rdata(), rmailbox_(other.rmailbox_), emailbox_(other.emailbox_)
+{}
+
+/// \brief Convert the \c MINFO to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c MINFO(const std::string&))).
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \return A \c string object that represents the \c MINFO object.
+std::string
+MINFO::toText() const {
+    return (rmailbox_.toText() + " " + emailbox_.toText());
+}
+
+/// \brief Render the \c MINFO in the wire format without name compression.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+MINFO::toWire(OutputBuffer& buffer) const {
+    rmailbox_.toWire(buffer);
+    emailbox_.toWire(buffer);
+}
+
+MINFO&
+MINFO::operator=(const MINFO& source) {
+    rmailbox_ = source.rmailbox_;
+    emailbox_ = source.emailbox_;
+
+    return (*this);
+}
+
+/// \brief Render the \c MINFO in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC3597, TYPE MINFO is "well-known", the rmailbox and
+/// emailbox fields (domain names) will be compressed.
+///
+/// \exception std::bad_alloc Internal resource allocation fails.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+MINFO::toWire(AbstractMessageRenderer& renderer) const {
+    renderer.writeName(rmailbox_);
+    renderer.writeName(emailbox_);
+}
+
+/// \brief Compare two instances of \c MINFO RDATA.
+///
+/// See documentation in \c Rdata.
+int
+MINFO::compare(const Rdata& other) const {
+    const MINFO& other_minfo = dynamic_cast<const MINFO&>(other);
+
+    const int cmp = compareNames(rmailbox_, other_minfo.rmailbox_);
+    if (cmp != 0) {
+        return (cmp);
+    }
+    return (compareNames(emailbox_, other_minfo.emailbox_));
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/minfo_14.h b/src/lib/dns/rdata/generic/minfo_14.h
new file mode 100644
index 0000000..f3ee1d0
--- /dev/null
+++ b/src/lib/dns/rdata/generic/minfo_14.h
@@ -0,0 +1,82 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::generic::MINFO class represents the MINFO RDATA as
+/// defined in RFC1035.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// MINFO RDATA.
+class MINFO : public Rdata {
+public:
+    // BEGIN_COMMON_MEMBERS
+    // END_COMMON_MEMBERS
+
+    /// \brief Define the assignment operator.
+    ///
+    /// \exception std::bad_alloc Memory allocation fails in copying
+    /// internal member variables (this should be very rare).
+    MINFO& operator=(const MINFO& source);
+
+    /// \brief Return the value of the rmailbox field.
+    ///
+    /// \exception std::bad_alloc If resource allocation for the returned
+    /// \c Name fails.
+    ///
+    /// \note
+    /// Unlike the case of some other RDATA classes (such as
+    /// \c NS::getNSName()), this method constructs a new \c Name object
+    /// and returns it, instead of returning a reference to a \c Name object
+    /// internally maintained in the class (which is a private member).
+    /// This is based on the observation that this method will be rarely
+    /// used and even when it's used it will not be in a performance context
+    /// (for example, a recursive resolver won't need this field in its
+    /// resolution process).  By returning a new object we have flexibility
+    /// of changing the internal representation without the risk of changing
+    /// the interface or method property.
+    /// The same note applies to the \c getEmailbox() method.
+    Name getRmailbox() const { return (rmailbox_); }
+
+    /// \brief Return the value of the emailbox field.
+    ///
+    /// \exception std::bad_alloc If resource allocation for the returned
+    /// \c Name fails.
+    Name getEmailbox() const { return (emailbox_); }
+
+private:
+    Name rmailbox_;
+    Name emailbox_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/naptr_35.cc b/src/lib/dns/rdata/generic/naptr_35.cc
new file mode 100644
index 0000000..129bf6c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.cc
@@ -0,0 +1,220 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+
+#include <string>
+
+#include <boost/lexical_cast.hpp>
+
+#include <exceptions/exceptions.h>
+
+#include <dns/character_string.h>
+#include <dns/name.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace boost;
+using namespace isc::util;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+namespace {
+/// Skip the left whitespaces of the input string
+///
+/// \param input_str The input string
+/// \param input_iterator From which the skipping started
+void
+skipLeftSpaces(const std::string& input_str,
+               std::string::const_iterator& input_iterator)
+{
+    if (input_iterator >= input_str.end()) {
+        isc_throw(InvalidRdataText,
+                  "Invalid NAPTR text format, field is missing.");
+    }
+
+    if (!isspace(*input_iterator)) {
+        isc_throw(InvalidRdataText,
+            "Invalid NAPTR text format, fields are not separated by space.");
+    }
+    // Skip white spaces
+    while (input_iterator < input_str.end() && isspace(*input_iterator)) {
+        ++input_iterator;
+    }
+}
+
+} // Anonymous namespace
+
+NAPTR::NAPTR(InputBuffer& buffer, size_t len):
+    replacement_(".")
+{
+    order_ = buffer.readUint16();
+    preference_ = buffer.readUint16();
+
+    flags_ = getNextCharacterString(buffer, len);
+    services_ = getNextCharacterString(buffer, len);
+    regexp_ = getNextCharacterString(buffer, len);
+    replacement_ = Name(buffer);
+}
+
+NAPTR::NAPTR(const std::string& naptr_str):
+    replacement_(".")
+{
+    istringstream iss(naptr_str);
+    uint16_t order;
+    uint16_t preference;
+
+    iss >> order >> preference;
+
+    if (iss.bad() || iss.fail()) {
+        isc_throw(InvalidRdataText, "Invalid NAPTR text format");
+    }
+
+    order_ = order;
+    preference_ = preference;
+
+    string::const_iterator input_iterator = naptr_str.begin() + iss.tellg();
+
+    skipLeftSpaces(naptr_str, input_iterator);
+
+    flags_ = getNextCharacterString(naptr_str, input_iterator);
+
+    skipLeftSpaces(naptr_str, input_iterator);
+
+    services_ = getNextCharacterString(naptr_str, input_iterator);
+
+    skipLeftSpaces(naptr_str, input_iterator);
+
+    regexp_ = getNextCharacterString(naptr_str, input_iterator);
+
+    skipLeftSpaces(naptr_str, input_iterator);
+
+    if (input_iterator < naptr_str.end()) {
+        string replacementStr(input_iterator, naptr_str.end());
+
+        replacement_ = Name(replacementStr);
+    } else {
+        isc_throw(InvalidRdataText,
+                  "Invalid NAPTR text format, replacement field is missing");
+    }
+}
+
+NAPTR::NAPTR(const NAPTR& naptr):
+    Rdata(), order_(naptr.order_), preference_(naptr.preference_),
+    flags_(naptr.flags_), services_(naptr.services_), regexp_(naptr.regexp_),
+    replacement_(naptr.replacement_)
+{
+}
+
+void
+NAPTR::toWire(OutputBuffer& buffer) const {
+    toWireHelper(buffer);
+}
+
+void
+NAPTR::toWire(AbstractMessageRenderer& renderer) const {
+    toWireHelper(renderer);
+}
+
+string
+NAPTR::toText() const {
+    string result;
+    result += lexical_cast<string>(order_);
+    result += " ";
+    result += lexical_cast<string>(preference_);
+    result += " \"";
+    result += flags_;
+    result += "\" \"";
+    result += services_;
+    result += "\" \"";
+    result += regexp_;
+    result += "\" ";
+    result += replacement_.toText();
+    return (result);
+}
+
+int
+NAPTR::compare(const Rdata& other) const {
+    const NAPTR other_naptr = dynamic_cast<const NAPTR&>(other);
+
+    if (order_ < other_naptr.order_) {
+        return (-1);
+    } else if (order_ > other_naptr.order_) {
+        return (1);
+    }
+
+    if (preference_ < other_naptr.preference_) {
+        return (-1);
+    } else if (preference_ > other_naptr.preference_) {
+        return (1);
+    }
+
+    if (flags_ < other_naptr.flags_) {
+        return (-1);
+    } else if (flags_ > other_naptr.flags_) {
+        return (1);
+    }
+
+    if (services_ < other_naptr.services_) {
+        return (-1);
+    } else if (services_ > other_naptr.services_) {
+        return (1);
+    }
+
+    if (regexp_ < other_naptr.regexp_) {
+        return (-1);
+    } else if (regexp_ > other_naptr.regexp_) {
+        return (1);
+    }
+
+    return (compareNames(replacement_, other_naptr.replacement_));
+}
+
+uint16_t
+NAPTR::getOrder() const {
+    return (order_);
+}
+
+uint16_t
+NAPTR::getPreference() const {
+    return (preference_);
+}
+
+const std::string&
+NAPTR::getFlags() const {
+    return (flags_);
+}
+
+const std::string&
+NAPTR::getServices() const {
+    return (services_);
+}
+
+const std::string&
+NAPTR::getRegexp() const {
+    return (regexp_);
+}
+
+const Name&
+NAPTR::getReplacement() const {
+    return (replacement_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/naptr_35.h b/src/lib/dns/rdata/generic/naptr_35.h
new file mode 100644
index 0000000..ca16b3c
--- /dev/null
+++ b/src/lib/dns/rdata/generic/naptr_35.h
@@ -0,0 +1,83 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <util/buffer.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c NAPTR class represents the NAPTR rdata defined in
+/// RFC2915, RFC2168 and RFC3403
+///
+/// This class implements the basic interfaces inherited from the
+/// \c rdata::Rdata class, and provides accessors specific to the
+/// NAPTR rdata.
+class NAPTR : public Rdata {
+public:
+    // BEGIN_COMMON_MEMBERS
+    // END_COMMON_MEMBERS
+
+    // NAPTR specific methods
+    uint16_t getOrder() const;
+    uint16_t getPreference() const;
+    const std::string& getFlags() const;
+    const std::string& getServices() const;
+    const std::string& getRegexp() const;
+    const Name& getReplacement() const;
+private:
+    /// Helper template function for toWire()
+    ///
+    /// \param outputer Where to write data in
+    template <typename T>
+    void toWireHelper(T& outputer) const {
+        outputer.writeUint16(order_);
+        outputer.writeUint16(preference_);
+
+        outputer.writeUint8(flags_.size());
+        outputer.writeData(flags_.c_str(), flags_.size());
+
+        outputer.writeUint8(services_.size());
+        outputer.writeData(services_.c_str(), services_.size());
+
+        outputer.writeUint8(regexp_.size());
+        outputer.writeData(regexp_.c_str(), regexp_.size());
+
+        replacement_.toWire(outputer);
+    }
+
+    uint16_t order_;
+    uint16_t preference_;
+    std::string flags_;
+    std::string services_;
+    std::string regexp_;
+    Name replacement_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/generic/nsec_47.cc b/src/lib/dns/rdata/generic/nsec_47.cc
index 93b8b5f..4723c23 100644
--- a/src/lib/dns/rdata/generic/nsec_47.cc
+++ b/src/lib/dns/rdata/generic/nsec_47.cc
@@ -178,6 +178,11 @@ NSEC::toWire(AbstractMessageRenderer& renderer) const {
     renderer.writeData(&impl_->typebits_[0], impl_->typebits_.size());
 }
 
+const Name&
+NSEC::getNextName() const {
+    return (impl_->nextname_);
+}
+
 int
 NSEC::compare(const Rdata& other) const {
     const NSEC& other_nsec = dynamic_cast<const NSEC&>(other);
diff --git a/src/lib/dns/rdata/generic/nsec_47.h b/src/lib/dns/rdata/generic/nsec_47.h
index b86a25b..005dd3a 100644
--- a/src/lib/dns/rdata/generic/nsec_47.h
+++ b/src/lib/dns/rdata/generic/nsec_47.h
@@ -38,6 +38,16 @@ public:
     // END_COMMON_MEMBERS
     NSEC& operator=(const NSEC& source);
     ~NSEC();
+
+    // specialized methods
+
+    /// Return the next domain name.
+    ///
+    /// \exception std::bad_alloc Resource allocation failure in name copy.
+    ///
+    /// \return The next domain name field in the form of \c Name object.
+    const Name& getNextName() const;
+
 private:
     NSECImpl* impl_;
 };
diff --git a/src/lib/dns/rdata/generic/rp_17.cc b/src/lib/dns/rdata/generic/rp_17.cc
index b8b2ba2..781b55d 100644
--- a/src/lib/dns/rdata/generic/rp_17.cc
+++ b/src/lib/dns/rdata/generic/rp_17.cc
@@ -24,6 +24,7 @@
 
 using namespace std;
 using namespace isc::dns;
+using namespace isc::util;
 
 // BEGIN_ISC_NAMESPACE
 // BEGIN_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.cc b/src/lib/dns/rdata/generic/rrsig_46.cc
index 0c82406..59ff030 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.cc
+++ b/src/lib/dns/rdata/generic/rrsig_46.cc
@@ -243,5 +243,10 @@ RRSIG::compare(const Rdata& other) const {
     }
 }
 
+const RRType&
+RRSIG::typeCovered() const {
+    return (impl_->covered_);
+}
+
 // END_RDATA_NAMESPACE
 // END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/rrsig_46.h b/src/lib/dns/rdata/generic/rrsig_46.h
index 19acc40..b32c17f 100644
--- a/src/lib/dns/rdata/generic/rrsig_46.h
+++ b/src/lib/dns/rdata/generic/rrsig_46.h
@@ -38,6 +38,9 @@ public:
     // END_COMMON_MEMBERS
     RRSIG& operator=(const RRSIG& source);
     ~RRSIG();
+
+    // specialized methods
+    const RRType& typeCovered() const;
 private:
     RRSIGImpl* impl_;
 };
diff --git a/src/lib/dns/rdata/generic/soa_6.cc b/src/lib/dns/rdata/generic/soa_6.cc
index 7ecd84f..e473bca 100644
--- a/src/lib/dns/rdata/generic/soa_6.cc
+++ b/src/lib/dns/rdata/generic/soa_6.cc
@@ -106,6 +106,12 @@ SOA::toWire(AbstractMessageRenderer& renderer) const {
     renderer.writeData(numdata_, sizeof(numdata_));
 }
 
+Serial
+SOA::getSerial() const {
+    InputBuffer b(numdata_, sizeof(numdata_));
+    return (Serial(b.readUint32()));
+}
+
 string
 SOA::toText() const {
     InputBuffer b(numdata_, sizeof(numdata_));
diff --git a/src/lib/dns/rdata/generic/soa_6.h b/src/lib/dns/rdata/generic/soa_6.h
index 3f6185e..2c180b2 100644
--- a/src/lib/dns/rdata/generic/soa_6.h
+++ b/src/lib/dns/rdata/generic/soa_6.h
@@ -18,6 +18,7 @@
 
 #include <dns/name.h>
 #include <dns/rdata.h>
+#include <dns/serial.h>
 
 // BEGIN_ISC_NAMESPACE
 
@@ -34,6 +35,8 @@ public:
     SOA(const Name& mname, const Name& rname, uint32_t serial,
         uint32_t refresh, uint32_t retry, uint32_t expire,
         uint32_t minimum);
+    /// \brief Returns the serial stored in the SOA.
+    Serial getSerial() const;
 private:
     /// Note: this is a prototype version; we may reconsider
     /// this representation later.
diff --git a/src/lib/dns/rdata/generic/spf_99.cc b/src/lib/dns/rdata/generic/spf_99.cc
new file mode 100644
index 0000000..aa3e4a1
--- /dev/null
+++ b/src/lib/dns/rdata/generic/spf_99.cc
@@ -0,0 +1,131 @@
+// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+#include <vector>
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class. The semantics of the class is provided by
+/// a copy of instantiated TXTLikeImpl class common to both TXT and SPF.
+
+#include <dns/rdata/generic/detail/txt_like.h>
+
+/// \brief The assignment operator
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
+SPF&
+SPF::operator=(const SPF& source) {
+    if (impl_ == source.impl_) {
+        return (*this);
+    }
+
+    SPFImpl* newimpl = new SPFImpl(*source.impl_);
+    delete impl_;
+    impl_ = newimpl;
+
+    return (*this);
+}
+
+/// \brief The destructor
+SPF::~SPF() {
+    delete impl_;
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+SPF::SPF(InputBuffer& buffer, size_t rdata_len) :
+    impl_(new SPFImpl(buffer, rdata_len))
+{}
+
+/// \brief Constructor from string.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+SPF::SPF(const std::string& txtstr) :
+    impl_(new SPFImpl(txtstr))
+{}
+
+/// \brief Copy constructor
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+SPF::SPF(const SPF& other) :
+    Rdata(), impl_(new SPFImpl(*other.impl_))
+{}
+
+/// \brief Render the \c SPF in the wire format to a OutputBuffer object
+///
+/// \return is the return of the corresponding implementation method.
+void
+SPF::toWire(OutputBuffer& buffer) const {
+    impl_->toWire(buffer);
+}
+
+/// \brief Render the \c SPF in the wire format to an AbstractMessageRenderer
+/// object
+///
+/// \return is the return of the corresponding implementation method.
+void
+SPF::toWire(AbstractMessageRenderer& renderer) const {
+    impl_->toWire(renderer);
+}
+
+/// \brief Convert the \c SPF to a string.
+///
+/// \return is the return of the corresponding implementation method.
+string
+SPF::toText() const {
+    return (impl_->toText());
+}
+
+/// \brief Compare two instances of \c SPF RDATA.
+///
+/// This method compares \c this and the \c other \c SPF objects.
+///
+/// This method is expected to be used in a polymorphic way, and the
+/// parameter to compare against is therefore of the abstract \c Rdata class.
+/// However, comparing two \c Rdata objects of different RR types
+/// is meaningless, and \c other must point to a \c SPF object;
+/// otherwise, the standard \c bad_cast exception will be thrown.
+///
+/// \param other the right-hand operand to compare against.
+/// \return is the return of the corresponding implementation method.
+int
+SPF::compare(const Rdata& other) const {
+    const SPF& other_txt = dynamic_cast<const SPF&>(other);
+
+    return (impl_->compare(*other_txt.impl_));
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/spf_99.h b/src/lib/dns/rdata/generic/spf_99.h
new file mode 100644
index 0000000..04ac99b
--- /dev/null
+++ b/src/lib/dns/rdata/generic/spf_99.h
@@ -0,0 +1,78 @@
+// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <string>
+#include <vector>
+
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+template<class Type, uint16_t typeCode> class TXTLikeImpl;
+
+/// \brief \c rdata::SPF class represents the SPF RDATA as defined %in
+/// RFC4408.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class. The semantics of the class is provided by
+/// a copy of instantiated TXTLikeImpl class common to both TXT and SPF.
+class SPF : public Rdata {
+public:
+    // BEGIN_COMMON_MEMBERS
+    // END_COMMON_MEMBERS
+
+    /// \brief Assignment operator.
+    ///
+    /// It internally allocates a resource, and if it fails a corresponding
+    /// standard exception will be thrown.
+    /// This operator never throws an exception otherwise.
+    ///
+    /// This operator provides the strong exception guarantee: When an
+    /// exception is thrown the content of the assignment target will be
+    /// intact.
+    SPF& operator=(const SPF& source);
+
+    /// \brief The destructor.
+    ~SPF();
+
+    ///
+    /// Specialized methods
+    ///
+
+    /// \brief Return a reference to the data strings
+    ///
+    /// This method never throws an exception.
+    const std::vector<std::vector<uint8_t> >& getString() const;
+
+private:
+    typedef TXTLikeImpl<SPF, 99> SPFImpl;
+    SPFImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables: 
+// mode: c++
+// End: 
diff --git a/src/lib/dns/rdata/generic/txt_16.cc b/src/lib/dns/rdata/generic/txt_16.cc
index ac2ba8a..418bc05 100644
--- a/src/lib/dns/rdata/generic/txt_16.cc
+++ b/src/lib/dns/rdata/generic/txt_16.cc
@@ -30,130 +30,57 @@ using namespace isc::util;
 // BEGIN_ISC_NAMESPACE
 // BEGIN_RDATA_NAMESPACE
 
-TXT::TXT(InputBuffer& buffer, size_t rdata_len) {
-    if (rdata_len > MAX_RDLENGTH) {
-        isc_throw(InvalidRdataLength, "RDLENGTH too large: " << rdata_len);
-    }
+#include <dns/rdata/generic/detail/txt_like.h>
 
-    if (rdata_len == 0) {       // note that this couldn't happen in the loop.
-        isc_throw(DNSMessageFORMERR,
-                  "Error in parsing TXT RDATA: 0-length character string");
+TXT&
+TXT::operator=(const TXT& source) {
+    if (impl_ == source.impl_) {
+        return (*this);
     }
 
-    do {
-        const uint8_t len = buffer.readUint8();
-        if (rdata_len < len + 1) {
-            isc_throw(DNSMessageFORMERR,
-                      "Error in parsing TXT RDATA: character string length "
-                      "is too large: " << static_cast<int>(len));
-        }
-        vector<uint8_t> data(len + 1);
-        data[0] = len;
-        buffer.readData(&data[0] + 1, len);
-        string_list_.push_back(data);
-
-        rdata_len -= (len + 1);
-    } while (rdata_len > 0);
-}
-
-TXT::TXT(const std::string& txtstr) {
-    // TBD: this is a simple, incomplete implementation that only supports
-    // a single character-string.
+    TXTImpl* newimpl = new TXTImpl(*source.impl_);
+    delete impl_;
+    impl_ = newimpl;
 
-    size_t length = txtstr.size();
-    size_t pos_begin = 0;
-
-    if (length > 1 && txtstr[0] == '"' && txtstr[length - 1] == '"') {
-        pos_begin = 1;
-        length -= 2;
-    }
+    return (*this);
+}
 
-    if (length > MAX_CHARSTRING_LEN) {
-        isc_throw(CharStringTooLong, "TXT RDATA construction from text: "
-                  "string length is too long: " << length);
-    }
+TXT::~TXT() {
+    delete impl_;
+}
 
-    // TBD: right now, we don't support escaped characters
-    if (txtstr.find('\\') != string::npos) {
-        isc_throw(InvalidRdataText, "TXT RDATA from text: "
-                  "escaped character is currently not supported: " << txtstr);
-    }
+TXT::TXT(InputBuffer& buffer, size_t rdata_len) :
+    impl_(new TXTImpl(buffer, rdata_len))
+{}
 
-    vector<uint8_t> data;
-    data.reserve(length + 1);
-    data.push_back(length);
-    data.insert(data.end(), txtstr.begin() + pos_begin,
-                txtstr.begin() + pos_begin + length);
-    string_list_.push_back(data);
-}
+TXT::TXT(const std::string& txtstr) :
+    impl_(new TXTImpl(txtstr))
+{}
 
 TXT::TXT(const TXT& other) :
-    Rdata(), string_list_(other.string_list_)
+    Rdata(), impl_(new TXTImpl(*other.impl_))
 {}
 
 void
 TXT::toWire(OutputBuffer& buffer) const {
-    for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
-         it != string_list_.end();
-         ++it)
-    {
-        buffer.writeData(&(*it)[0], (*it).size());
-    }
+    impl_->toWire(buffer);
 }
 
 void
 TXT::toWire(AbstractMessageRenderer& renderer) const {
-    for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
-         it != string_list_.end();
-         ++it)
-    {
-        renderer.writeData(&(*it)[0], (*it).size());
-    }
+    impl_->toWire(renderer);
 }
 
 string
 TXT::toText() const {
-    string s;
-
-    // XXX: this implementation is not entirely correct.  for example, it
-    // should escape double-quotes if they appear in the character string.
-    for (vector<vector<uint8_t> >::const_iterator it = string_list_.begin();
-         it != string_list_.end();
-         ++it)
-    {
-        if (!s.empty()) {
-            s.push_back(' ');
-        }
-        s.push_back('"');
-        s.insert(s.end(), (*it).begin() + 1, (*it).end());
-        s.push_back('"');
-    }
-
-    return (s);
+    return (impl_->toText());
 }
 
 int
 TXT::compare(const Rdata& other) const {
     const TXT& other_txt = dynamic_cast<const TXT&>(other);
 
-    // This implementation is not efficient.  Revisit this (TBD).
-    OutputBuffer this_buffer(0);
-    toWire(this_buffer);
-    size_t this_len = this_buffer.getLength();
-
-    OutputBuffer other_buffer(0);
-    other_txt.toWire(other_buffer);
-    const size_t other_len = other_buffer.getLength();
-
-    const size_t cmplen = min(this_len, other_len);
-    const int cmp = memcmp(this_buffer.getData(), other_buffer.getData(),
-                           cmplen);
-    if (cmp != 0) {
-        return (cmp);
-    } else {
-        return ((this_len == other_len) ? 0 :
-                (this_len < other_len) ? -1 : 1);
-    }
+    return (impl_->compare(*other_txt.impl_));
 }
 
 // END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/generic/txt_16.h b/src/lib/dns/rdata/generic/txt_16.h
index b4c791f..d99d69b 100644
--- a/src/lib/dns/rdata/generic/txt_16.h
+++ b/src/lib/dns/rdata/generic/txt_16.h
@@ -28,14 +28,19 @@
 
 // BEGIN_RDATA_NAMESPACE
 
+template<class Type, uint16_t typeCode> class TXTLikeImpl;
+
 class TXT : public Rdata {
 public:
     // BEGIN_COMMON_MEMBERS
     // END_COMMON_MEMBERS
+
+    TXT& operator=(const TXT& source);
+    ~TXT();
+
 private:
-    /// Note: this is a prototype version; we may reconsider
-    /// this representation later.
-    std::vector<std::vector<uint8_t> > string_list_;
+    typedef TXTLikeImpl<TXT, 16> TXTImpl;
+    TXTImpl* impl_;
 };
 
 // END_RDATA_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.cc b/src/lib/dns/rdata/in_1/dhcid_49.cc
new file mode 100644
index 0000000..f0c4aca
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.cc
@@ -0,0 +1,145 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <stdint.h>
+#include <string.h>
+
+#include <string>
+
+#include <exceptions/exceptions.h>
+
+#include <util/buffer.h>
+#include <util/encode/base64.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief Constructor from string.
+///
+/// \param dhcid_str A base-64 representation of the DHCID binary data.
+/// The data is considered to be opaque, but a sanity check is performed.
+///
+/// <b>Exceptions</b>
+///
+/// \c dhcid_str must be a valid  BASE-64 string, otherwise an exception
+/// of class \c isc::BadValue will be thrown;
+/// the binary data should consist of at leat of 3 octets as per RFC4701:
+///           < 2 octets >    Identifier type code
+///           < 1 octet >     Digest type code
+///           < n octets >    Digest (length depends on digest type)
+/// If the data is less than 3 octets (i.e. it cannot contain id type code and
+/// digest type code), an exception of class \c InvalidRdataLength is thrown.
+DHCID::DHCID(const string& dhcid_str) {
+    istringstream iss(dhcid_str);
+    stringbuf digestbuf;
+
+    iss >> &digestbuf;
+    isc::util::encode::decodeBase64(digestbuf.str(), digest_);
+
+    // RFC4701 states DNS software should consider the RDATA section to
+    // be opaque, but there must be at least three bytes in the data:
+    // < 2 octets >    Identifier type code
+    // < 1 octet >     Digest type code
+    if (digest_.size() < 3) {
+        isc_throw(InvalidRdataLength, "DHCID length " << digest_.size() <<
+                  " too short, need at least 3 bytes");
+    }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// \param buffer A buffer storing the wire format data.
+/// \param rdata_len The length of the RDATA in bytes
+///
+/// <b>Exceptions</b>
+/// \c InvalidRdataLength is thrown if \c rdata_len is than minimum of 3 octets
+DHCID::DHCID(InputBuffer& buffer, size_t rdata_len) {
+    if (rdata_len < 3) {
+        isc_throw(InvalidRdataLength, "DHCID length " << rdata_len <<
+                  " too short, need at least 3 bytes");
+    }
+
+    digest_.resize(rdata_len);
+    buffer.readData(&digest_[0], rdata_len);
+}
+
+/// \brief The copy constructor.
+///
+/// This trivial copy constructor never throws an exception.
+DHCID::DHCID(const DHCID& other) : Rdata(), digest_(other.digest_)
+{}
+
+/// \brief Render the \c DHCID in the wire format.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+DHCID::toWire(OutputBuffer& buffer) const {
+    buffer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Render the \c DHCID in the wire format into a
+/// \c MessageRenderer object.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer in which the \c DHCID is to be stored.
+void
+DHCID::toWire(AbstractMessageRenderer& renderer) const {
+    renderer.writeData(&digest_[0], digest_.size());
+}
+
+/// \brief Convert the \c DHCID to a string.
+///
+/// This method returns a \c std::string object representing the \c DHCID.
+///
+/// \return A string representation of \c DHCID.
+string
+DHCID::toText() const {
+    return (isc::util::encode::encodeBase64(digest_));
+}
+
+/// \brief Compare two instances of \c DHCID RDATA.
+///
+/// See documentation in \c Rdata.
+int
+DHCID::compare(const Rdata& other) const {
+    const DHCID& other_dhcid = dynamic_cast<const DHCID&>(other);
+
+    size_t this_len = digest_.size();
+    size_t other_len = other_dhcid.digest_.size();
+    size_t cmplen = min(this_len, other_len);
+    int cmp = memcmp(&digest_[0], &other_dhcid.digest_[0], cmplen);
+    if (cmp != 0) {
+        return (cmp);
+    } else {
+        return ((this_len == other_len) ? 0 : (this_len < other_len) ? -1 : 1);
+    }
+}
+
+/// \brief Accessor method to get the DHCID digest
+///
+/// \return A reference to the binary DHCID data
+const std::vector<uint8_t>&
+DHCID::getDigest() const {
+    return (digest_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/dhcid_49.h b/src/lib/dns/rdata/in_1/dhcid_49.h
new file mode 100644
index 0000000..90f5fab
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/dhcid_49.h
@@ -0,0 +1,58 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <string>
+#include <vector>
+
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+/// \brief \c rdata::DHCID class represents the DHCID RDATA as defined %in
+/// RFC4701.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// DHCID RDATA.
+class DHCID : public Rdata {
+public:
+    // BEGIN_COMMON_MEMBERS
+    // END_COMMON_MEMBERS
+
+    /// \brief Return the digest.
+    ///
+    /// This method never throws an exception.
+    const std::vector<uint8_t>& getDigest() const;
+
+private:
+    /// \brief Private data representation
+    ///
+    /// Opaque data at least 3 octets long as per RFC4701.
+    ///
+    std::vector<uint8_t> digest_;
+};
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables: 
+// mode: c++
+// End: 
diff --git a/src/lib/dns/rdata/in_1/srv_33.cc b/src/lib/dns/rdata/in_1/srv_33.cc
new file mode 100644
index 0000000..93b5d4d
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/srv_33.cc
@@ -0,0 +1,245 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <iostream>
+#include <sstream>
+
+#include <boost/lexical_cast.hpp>
+
+#include <util/buffer.h>
+#include <util/strutil.h>
+
+#include <dns/messagerenderer.h>
+#include <dns/name.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+
+using namespace std;
+using namespace isc::util;
+using namespace isc::util::str;
+
+// BEGIN_ISC_NAMESPACE
+// BEGIN_RDATA_NAMESPACE
+
+struct SRVImpl {
+    // straightforward representation of SRV RDATA fields
+    SRVImpl(uint16_t priority, uint16_t weight, uint16_t port,
+           const Name& target) :
+        priority_(priority), weight_(weight), port_(port),
+        target_(target)
+    {}
+
+    uint16_t priority_;
+    uint16_t weight_;
+    uint16_t port_;
+    Name target_;
+};
+
+/// \brief Constructor from string.
+///
+/// \c srv_str must be formatted as follows:
+/// \code <Priority> <Weight> <Port> <Target>
+/// \endcode
+/// where
+/// - <Priority>, <Weight>, and <Port> are an unsigned 16-bit decimal
+///   integer.
+/// - <Target> is a valid textual representation of domain name.
+///
+/// An example of valid string is:
+/// \code "1 5 1500 example.com." \endcode
+///
+/// <b>Exceptions</b>
+///
+/// If <Target> is not a valid domain name, a corresponding exception from
+/// the \c Name class will be thrown;
+/// if %any of the other bullet points above is not met, an exception of
+/// class \c InvalidRdataText will be thrown.
+/// This constructor internally involves resource allocation, and if it fails
+/// a corresponding standard exception will be thrown.
+SRV::SRV(const string& srv_str) :
+    impl_(NULL)
+{
+    istringstream iss(srv_str);
+
+    try {
+        const int32_t priority = tokenToNum<int32_t, 16>(getToken(iss));
+        const int32_t weight = tokenToNum<int32_t, 16>(getToken(iss));
+        const int32_t port = tokenToNum<int32_t, 16>(getToken(iss));
+        const Name targetname(getToken(iss));
+
+        if (!iss.eof()) {
+            isc_throw(InvalidRdataText, "Unexpected input for SRV RDATA: " <<
+                    srv_str);
+        }
+
+        impl_ = new SRVImpl(priority, weight, port, targetname);
+    } catch (const StringTokenError& ste) {
+        isc_throw(InvalidRdataText, "Invalid SRV text: " <<
+                  ste.what() << ": " << srv_str);
+    }
+}
+
+/// \brief Constructor from wire-format data.
+///
+/// When a read operation on \c buffer fails (e.g., due to a corrupted
+/// message) a corresponding exception from the \c InputBuffer class will
+/// be thrown.
+/// If the wire-format data does not end with a valid domain name,
+/// a corresponding exception from the \c Name class will be thrown.
+/// In addition, this constructor internally involves resource allocation,
+/// and if it fails a corresponding standard exception will be thrown.
+///
+/// According to RFC2782, the Target field must be a non compressed form
+/// of domain name.  But this implementation accepts a %SRV RR even if that
+/// field is compressed as suggested in RFC3597.
+///
+/// \param buffer A buffer storing the wire format data.
+/// \param rdata_len The length of the RDATA in bytes, normally expected
+/// to be the value of the RDLENGTH field of the corresponding RR.
+SRV::SRV(InputBuffer& buffer, size_t rdata_len) {
+    if (rdata_len < 6) {
+        isc_throw(InvalidRdataLength, "SRV too short");
+    }
+
+    uint16_t priority = buffer.readUint16();
+    uint16_t weight = buffer.readUint16();
+    uint16_t port = buffer.readUint16();
+    const Name targetname(buffer);
+
+    impl_ = new SRVImpl(priority, weight, port, targetname);
+}
+
+/// \brief The copy constructor.
+///
+/// It internally allocates a resource, and if it fails a corresponding
+/// standard exception will be thrown.
+/// This constructor never throws an exception otherwise.
+SRV::SRV(const SRV& source) :
+    Rdata(), impl_(new SRVImpl(*source.impl_))
+{}
+
+SRV&
+SRV::operator=(const SRV& source) {
+    if (impl_ == source.impl_) {
+        return (*this);
+    }
+
+    SRVImpl* newimpl = new SRVImpl(*source.impl_);
+    delete impl_;
+    impl_ = newimpl;
+
+    return (*this);
+}
+
+SRV::~SRV() {
+    delete impl_;
+}
+
+/// \brief Convert the \c SRV to a string.
+///
+/// The output of this method is formatted as described in the "from string"
+/// constructor (\c SRV(const std::string&))).
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+///
+/// \return A \c string object that represents the \c SRV object.
+string
+SRV::toText() const {
+    using namespace boost;
+    return (lexical_cast<string>(impl_->priority_) +
+        " " + lexical_cast<string>(impl_->weight_) +
+        " " + lexical_cast<string>(impl_->port_) +
+        " " + impl_->target_.toText());
+}
+
+/// \brief Render the \c SRV in the wire format without name compression.
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
+///
+/// \param buffer An output buffer to store the wire data.
+void
+SRV::toWire(OutputBuffer& buffer) const {
+    buffer.writeUint16(impl_->priority_);
+    buffer.writeUint16(impl_->weight_);
+    buffer.writeUint16(impl_->port_);
+    impl_->target_.toWire(buffer);
+}
+
+/// \brief Render the \c SRV in the wire format with taking into account
+/// compression.
+///
+/// As specified in RFC2782, the Target field (a domain name) will not be
+/// compressed.  However, the domain name could be a target of compression
+/// of other compressible names (though pretty unlikely), the offset
+/// information of the algorithm name may be recorded in \c renderer.
+///
+/// If internal resource allocation fails, a corresponding
+/// standard exception will be thrown.
+/// This method never throws an exception otherwise.
+///
+/// \param renderer DNS message rendering context that encapsulates the
+/// output buffer and name compression information.
+void
+SRV::toWire(AbstractMessageRenderer& renderer) const {
+    renderer.writeUint16(impl_->priority_);
+    renderer.writeUint16(impl_->weight_);
+    renderer.writeUint16(impl_->port_);
+    renderer.writeName(impl_->target_, false);
+}
+
+/// \brief Compare two instances of \c SRV RDATA.
+///
+/// See documentation in \c Rdata.
+int
+SRV::compare(const Rdata& other) const {
+    const SRV& other_srv = dynamic_cast<const SRV&>(other);
+
+    if (impl_->priority_ != other_srv.impl_->priority_) {
+        return (impl_->priority_ < other_srv.impl_->priority_ ? -1 : 1);
+    }
+    if (impl_->weight_ != other_srv.impl_->weight_) {
+        return (impl_->weight_ < other_srv.impl_->weight_ ? -1 : 1);
+    }
+    if (impl_->port_ != other_srv.impl_->port_) {
+        return (impl_->port_ < other_srv.impl_->port_ ? -1 : 1);
+    }
+
+    return (compareNames(impl_->target_, other_srv.impl_->target_));
+}
+
+uint16_t
+SRV::getPriority() const {
+    return (impl_->priority_);
+}
+
+uint16_t
+SRV::getWeight() const {
+    return (impl_->weight_);
+}
+
+uint16_t
+SRV::getPort() const {
+    return (impl_->port_);
+}
+
+const Name&
+SRV::getTarget() const {
+    return (impl_->target_);
+}
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
diff --git a/src/lib/dns/rdata/in_1/srv_33.h b/src/lib/dns/rdata/in_1/srv_33.h
new file mode 100644
index 0000000..32b7dc0
--- /dev/null
+++ b/src/lib/dns/rdata/in_1/srv_33.h
@@ -0,0 +1,93 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// BEGIN_HEADER_GUARD
+
+#include <stdint.h>
+
+#include <dns/name.h>
+#include <dns/rdata.h>
+
+// BEGIN_ISC_NAMESPACE
+
+// BEGIN_COMMON_DECLARATIONS
+// END_COMMON_DECLARATIONS
+
+// BEGIN_RDATA_NAMESPACE
+
+struct SRVImpl;
+
+/// \brief \c rdata::SRV class represents the SRV RDATA as defined %in
+/// RFC2782.
+///
+/// This class implements the basic interfaces inherited from the abstract
+/// \c rdata::Rdata class, and provides trivial accessors specific to the
+/// SRV RDATA.
+class SRV : public Rdata {
+public:
+    // BEGIN_COMMON_MEMBERS
+    // END_COMMON_MEMBERS
+
+    /// \brief Assignment operator.
+    ///
+    /// It internally allocates a resource, and if it fails a corresponding
+    /// standard exception will be thrown.
+    /// This operator never throws an exception otherwise.
+    ///
+    /// This operator provides the strong exception guarantee: When an
+    /// exception is thrown the content of the assignment target will be
+    /// intact.
+    SRV& operator=(const SRV& source);
+
+    /// \brief The destructor.
+    ~SRV();
+
+    ///
+    /// Specialized methods
+    ///
+
+    /// \brief Return the value of the priority field.
+    ///
+    /// This method never throws an exception.
+    uint16_t getPriority() const;
+
+    /// \brief Return the value of the weight field.
+    ///
+    /// This method never throws an exception.
+    uint16_t getWeight() const;
+
+    /// \brief Return the value of the port field.
+    ///
+    /// This method never throws an exception.
+    uint16_t getPort() const;
+
+    /// \brief Return the value of the target field.
+    ///
+    /// \return A reference to a \c Name class object corresponding to the
+    /// internal target name.
+    ///
+    /// This method never throws an exception.
+    const Name& getTarget() const;
+
+private:
+    SRVImpl* impl_;
+};
+
+// END_RDATA_NAMESPACE
+// END_ISC_NAMESPACE
+// END_HEADER_GUARD
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/rdata/template.cc b/src/lib/dns/rdata/template.cc
index d9f08ee..e85f82c 100644
--- a/src/lib/dns/rdata/template.cc
+++ b/src/lib/dns/rdata/template.cc
@@ -18,6 +18,7 @@
 #include <dns/messagerenderer.h>
 #include <dns/rdata.h>
 #include <dns/rdataclass.h>
+#include <dns/rrtype.h>
 
 using namespace std;
 using namespace isc::util;
diff --git a/src/lib/dns/rdatafields.h b/src/lib/dns/rdatafields.h
index e33bcd7..16880f0 100644
--- a/src/lib/dns/rdatafields.h
+++ b/src/lib/dns/rdatafields.h
@@ -296,7 +296,7 @@ public:
     /// as long as the \c RdataFields object is used.
     ///
     /// \param fields An array of \c FieldSpec entries.  This can be \c NULL.
-    /// \param nfields The number of entries of \c fields.
+    /// \param fields_length The total length of the \c fields.
     /// \param data A pointer to memory region for the entire RDATA.  This can
     /// be NULL.
     /// \param data_length The length of \c data in bytes.
diff --git a/src/lib/dns/rrset.h b/src/lib/dns/rrset.h
index 6c15b53..1586465 100644
--- a/src/lib/dns/rrset.h
+++ b/src/lib/dns/rrset.h
@@ -478,7 +478,7 @@ public:
 
     /// \brief Return the current \c Rdata corresponding to the rdata cursor.
     ///
-    /// \return A reference to an \c rdata::::Rdata object corresponding
+    /// \return A reference to an \c rdata::Rdata object corresponding
     /// to the rdata cursor.
     virtual const rdata::Rdata& getCurrent() const = 0;
 
diff --git a/src/lib/dns/rrtype-placeholder.h b/src/lib/dns/rrtype-placeholder.h
index 1cb028c..dad1b2b 100644
--- a/src/lib/dns/rrtype-placeholder.h
+++ b/src/lib/dns/rrtype-placeholder.h
@@ -22,6 +22,11 @@
 
 #include <exceptions/exceptions.h>
 
+// Solaris x86 defines DS in <sys/regset.h>, which gets pulled in by Boost
+#if defined(__sun) && defined(DS)
+# undef DS
+#endif
+
 namespace isc {
 namespace util {
 class InputBuffer;
diff --git a/src/lib/dns/serial.cc b/src/lib/dns/serial.cc
new file mode 100644
index 0000000..90bc242
--- /dev/null
+++ b/src/lib/dns/serial.cc
@@ -0,0 +1,76 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dns/serial.h>
+
+namespace isc {
+namespace dns {
+
+bool
+Serial::operator==(const Serial& other) const {
+    return (value_ == other.getValue());
+}
+
+bool
+Serial::operator!=(const Serial& other) const {
+    return (value_ != other.getValue());
+}
+
+bool
+Serial::operator<(const Serial& other) const {
+    uint32_t other_val = other.getValue();
+    bool result = false;
+    if (value_ < other_val) {
+        result = ((other_val - value_) <= MAX_SERIAL_INCREMENT);
+    } else if (other_val < value_) {
+        result = ((value_ - other_val) > MAX_SERIAL_INCREMENT);
+    }
+    return (result);
+}
+
+bool
+Serial::operator<=(const Serial& other) const {
+    return (operator==(other) || operator<(other));
+}
+
+bool
+Serial::operator>(const Serial& other) const {
+    return (!operator==(other) && !operator<(other));
+}
+
+bool
+Serial::operator>=(const Serial& other) const {
+    return (!operator<(other));
+}
+
+Serial
+Serial::operator+(uint32_t other_val) const {
+    uint64_t new_val = static_cast<uint64_t>(value_) +
+                       static_cast<uint64_t>(other_val);
+    return Serial(static_cast<uint32_t>(new_val % MAX_SERIAL_VALUE));
+}
+
+Serial
+Serial::operator+(const Serial& other) const {
+    return (operator+(other.getValue()));
+}
+
+std::ostream&
+operator<<(std::ostream& os, const Serial& serial) {
+    return (os << serial.getValue());
+}
+
+} // end namespace dns
+} // end namespace isc
+
diff --git a/src/lib/dns/serial.h b/src/lib/dns/serial.h
new file mode 100644
index 0000000..3549860
--- /dev/null
+++ b/src/lib/dns/serial.h
@@ -0,0 +1,155 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __SERIAL_H
+#define __SERIAL_H 1
+
+#include <stdint.h>
+#include <iostream>
+
+namespace isc {
+namespace dns {
+
+/// The maximum difference between two serial numbers. If the (plain uint32_t)
+/// difference between two serials is greater than this number, the smaller one
+/// is considered greater.
+const uint32_t MAX_SERIAL_INCREMENT = 2147483647;
+
+/// Maximum value a serial can have, used in + operator.
+const uint64_t MAX_SERIAL_VALUE = 4294967296ull;
+
+/// \brief This class defines DNS serial numbers and serial arithmetic.
+///
+/// DNS Serial number are in essence unsigned 32-bits numbers, with one
+/// catch; they should be compared using sequence space arithmetic.
+/// So given that they are 32-bits; as soon as the difference between two
+/// serial numbers is greater than 2147483647 (2^31 - 1), the lower number
+/// (in plain comparison) is considered the higher one.
+///
+/// In order to do this as transparently as possible, these numbers are
+/// stored in the Serial class, which overrides the basic comparison operators.
+///
+/// In this specific context, these operations are called 'serial number
+/// arithmetic', and they are defined in RFC 1982.
+///
+/// \note RFC 1982 defines everything based on the value SERIAL_BITS. Since
+/// the serial number has a fixed length of 32 bits, the values we use are
+/// hard-coded, and not computed based on variable bit lengths.
+class Serial {
+public:
+    /// \brief Constructor with value
+    ///
+    /// \param value The uint32_t value of the serial
+    explicit Serial(uint32_t value) : value_(value) {}
+
+    /// \brief Copy constructor
+    Serial(const Serial& other) : value_(other.getValue()) {}
+
+    /// \brief Direct assignment from other Serial
+    ///
+    /// \param other The Serial to assign the value from
+    void operator=(const Serial& other) { value_ = other.getValue(); }
+
+    /// \brief Direct assignment from value
+    ///
+    /// \param value the uint32_t value to assing
+    void operator=(uint32_t value) { value_ = value; }
+
+    /// \brief Returns the uint32_t representation of this serial value
+    ///
+    /// \return The uint32_t value of this Serial
+    uint32_t getValue() const { return (value_); }
+
+    /// \brief Returns true if the serial values are equal
+    ///
+    /// \return True if the values are equal
+    bool operator==(const Serial& other) const;
+
+    /// \brief Returns true if the serial values are not equal
+    ///
+    /// \return True if the values are not equal
+    bool operator!=(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is smaller than
+    /// the other, according to serial arithmetic as described in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is smaller than the given value
+    bool operator<(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is equal to or
+    /// smaller than the other, according to serial arithmetic as described
+    /// in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is smaller than or equal to the given value
+    bool operator<=(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is greater than
+    /// the other, according to serial arithmetic as described in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is greater than the given value
+    bool operator>(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is equal to or
+    /// greater than the other, according to serial arithmetic as described in
+    /// RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is greater than or equal to the given value
+    bool operator>=(const Serial& other) const;
+
+    /// \brief Adds the given value to the serial number. If this would make
+    /// the number greater than 2^32-1, it is 'wrapped'.
+    /// \note According to the specification, an addition greater than
+    /// MAX_SERIAL_INCREMENT is undefined. We do NOT catch this error (so as not
+    /// to raise exceptions), but this behaviour remains undefined.
+    ///
+    /// \param other The Serial to add
+    ///
+    /// \return The result of the addition
+    Serial operator+(const Serial& other) const;
+
+    /// \brief Adds the given value to the serial number. If this would make
+    /// the number greater than 2^32-1, it is 'wrapped'.
+    ///
+    /// \note According to the specification, an addition greater than
+    /// MAX_SERIAL_INCREMENT is undefined. We do NOT catch this error (so as not
+    /// to raise exceptions), but this behaviour remains undefined.
+    ///
+    /// \param other_val The uint32_t value to add
+    ///
+    /// \return The result of the addition
+    Serial operator+(uint32_t other_val) const;
+
+private:
+    uint32_t value_;
+};
+
+/// \brief Helper operator for output streams, writes the value to the stream
+///
+/// \param os The ostream to write to
+/// \param serial The Serial to write
+/// \return the output stream
+std::ostream& operator<<(std::ostream& os, const Serial& serial);
+
+} // end namespace dns
+} // end namespace isc
+
+#endif // __SERIAL_H
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index 3a249c1..fc6c87c 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -29,28 +29,37 @@ run_unittests_SOURCES += rdata_unittest.h rdata_unittest.cc
 run_unittests_SOURCES += rdatafields_unittest.cc
 run_unittests_SOURCES += rdata_in_a_unittest.cc rdata_in_aaaa_unittest.cc
 run_unittests_SOURCES += rdata_ns_unittest.cc rdata_soa_unittest.cc
-run_unittests_SOURCES += rdata_txt_unittest.cc rdata_mx_unittest.cc
+run_unittests_SOURCES += rdata_txt_like_unittest.cc
+run_unittests_SOURCES += rdata_mx_unittest.cc
 run_unittests_SOURCES += rdata_ptr_unittest.cc rdata_cname_unittest.cc
 run_unittests_SOURCES += rdata_dname_unittest.cc
+run_unittests_SOURCES += rdata_afsdb_unittest.cc
 run_unittests_SOURCES += rdata_opt_unittest.cc
+run_unittests_SOURCES += rdata_dhcid_unittest.cc
 run_unittests_SOURCES += rdata_dnskey_unittest.cc
-run_unittests_SOURCES += rdata_ds_unittest.cc
+run_unittests_SOURCES += rdata_ds_like_unittest.cc
 run_unittests_SOURCES += rdata_nsec_unittest.cc
 run_unittests_SOURCES += rdata_nsec3_unittest.cc
 run_unittests_SOURCES += rdata_nsecbitmap_unittest.cc
 run_unittests_SOURCES += rdata_nsec3param_unittest.cc
 run_unittests_SOURCES += rdata_rrsig_unittest.cc
 run_unittests_SOURCES += rdata_rp_unittest.cc
+run_unittests_SOURCES += rdata_srv_unittest.cc
+run_unittests_SOURCES += rdata_minfo_unittest.cc
 run_unittests_SOURCES += rdata_tsig_unittest.cc
+run_unittests_SOURCES += rdata_naptr_unittest.cc
+run_unittests_SOURCES += rdata_hinfo_unittest.cc
 run_unittests_SOURCES += rrset_unittest.cc rrsetlist_unittest.cc
 run_unittests_SOURCES += question_unittest.cc
 run_unittests_SOURCES += rrparamregistry_unittest.cc
 run_unittests_SOURCES += masterload_unittest.cc
 run_unittests_SOURCES += message_unittest.cc
+run_unittests_SOURCES += serial_unittest.cc
 run_unittests_SOURCES += tsig_unittest.cc
 run_unittests_SOURCES += tsigerror_unittest.cc
 run_unittests_SOURCES += tsigkey_unittest.cc
 run_unittests_SOURCES += tsigrecord_unittest.cc
+run_unittests_SOURCES += character_string_unittest.cc
 run_unittests_SOURCES += run_unittests.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 # We shouldn't need to include BOTAN_LDFLAGS here, but there
diff --git a/src/lib/dns/tests/character_string_unittest.cc b/src/lib/dns/tests/character_string_unittest.cc
new file mode 100644
index 0000000..5fed9eb
--- /dev/null
+++ b/src/lib/dns/tests/character_string_unittest.cc
@@ -0,0 +1,92 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+
+#include <gtest/gtest.h>
+
+#include <dns/rdata.h>
+#include <dns/tests/unittest_util.h>
+#include <dns/character_string.h>
+
+using isc::UnitTestUtil;
+
+using namespace std;
+using namespace isc;
+using namespace isc::dns;
+using namespace isc::dns::characterstr;
+using namespace isc::dns::rdata;
+
+namespace {
+
+class CharacterString {
+public:
+    CharacterString(const string& str){
+        string::const_iterator it = str.begin();
+        characterStr_ = getNextCharacterString(str, it);
+    }
+    const string& str() const { return characterStr_; }
+private:
+    string characterStr_;
+};
+
+TEST(CharacterStringTest, testNormalCase) {
+    CharacterString cstr1("foo");
+    EXPECT_EQ(string("foo"), cstr1.str());
+
+    // Test <character-string> that separated by space
+    CharacterString cstr2("foo bar");
+    EXPECT_EQ(string("foo"), cstr2.str());
+
+    // Test <character-string> that separated by quotes
+    CharacterString cstr3("\"foo bar\"");
+    EXPECT_EQ(string("foo bar"), cstr3.str());
+
+    // Test <character-string> that not separate by quotes but ended with quotes
+    CharacterString cstr4("foo\"");
+    EXPECT_EQ(string("foo\""), cstr4.str());
+}
+
+TEST(CharacterStringTest, testBadCase) {
+    // The <character-string> that started with quotes should also be ended
+    // with quotes
+    EXPECT_THROW(CharacterString cstr("\"foo"), InvalidRdataText);
+
+    // The string length cannot exceed 255 characters
+    string str;
+    for (int i = 0; i < 257; ++i) {
+        str += 'A';
+    }
+    EXPECT_THROW(CharacterString cstr(str), CharStringTooLong);
+}
+
+TEST(CharacterStringTest, testEscapeCharacter) {
+    CharacterString cstr1("foo\\bar");
+    EXPECT_EQ(string("foobar"), cstr1.str());
+
+    CharacterString cstr2("foo\\\\bar");
+    EXPECT_EQ(string("foo\\bar"), cstr2.str());
+
+    CharacterString cstr3("fo\\111bar");
+    EXPECT_EQ(string("foobar"), cstr3.str());
+
+    CharacterString cstr4("fo\\1112bar");
+    EXPECT_EQ(string("foo2bar"), cstr4.str());
+
+    // There must be at least 3 digits followed by '\'
+    EXPECT_THROW(CharacterString cstr("foo\\98ar"), InvalidRdataText);
+    EXPECT_THROW(CharacterString cstr("foo\\9ar"), InvalidRdataText);
+    EXPECT_THROW(CharacterString cstr("foo\\98"), InvalidRdataText);
+}
+
+} // namespace
diff --git a/src/lib/dns/tests/message_unittest.cc b/src/lib/dns/tests/message_unittest.cc
index 6430626..f068791 100644
--- a/src/lib/dns/tests/message_unittest.cc
+++ b/src/lib/dns/tests/message_unittest.cc
@@ -118,16 +118,20 @@ protected:
     vector<unsigned char> received_data;
     vector<unsigned char> expected_data;
 
-    void factoryFromFile(Message& message, const char* datafile);
+    void factoryFromFile(Message& message, const char* datafile,
+                         Message::ParseOptions options =
+                         Message::PARSE_DEFAULT);
 };
 
 void
-MessageTest::factoryFromFile(Message& message, const char* datafile) {
+MessageTest::factoryFromFile(Message& message, const char* datafile,
+                             Message::ParseOptions options)
+{
     received_data.clear();
     UnitTestUtil::readWireData(datafile, received_data);
 
     InputBuffer buffer(&received_data[0], received_data.size());
-    message.fromWire(buffer);
+    message.fromWire(buffer, options);
 }
 
 TEST_F(MessageTest, headerFlag) {
@@ -175,7 +179,6 @@ TEST_F(MessageTest, headerFlag) {
     EXPECT_THROW(message_parse.setHeaderFlag(Message::HEADERFLAG_QR),
                  InvalidMessageOperation);
 }
-
 TEST_F(MessageTest, getEDNS) {
     EXPECT_FALSE(message_parse.getEDNS()); // by default EDNS isn't set
 
@@ -532,7 +535,46 @@ TEST_F(MessageTest, appendSection) {
     
 }
 
+TEST_F(MessageTest, parseHeader) {
+    received_data.clear();
+    UnitTestUtil::readWireData("message_fromWire1", received_data);
+
+    // parseHeader() isn't allowed in the render mode.
+    InputBuffer buffer(&received_data[0], received_data.size());
+    EXPECT_THROW(message_render.parseHeader(buffer), InvalidMessageOperation);
+
+    message_parse.parseHeader(buffer);
+    EXPECT_EQ(0x1035, message_parse.getQid());
+    EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
+    EXPECT_EQ(Rcode::NOERROR(), message_parse.getRcode());
+    EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_QR));
+    EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_AA));
+    EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_TC));
+    EXPECT_TRUE(message_parse.getHeaderFlag(Message::HEADERFLAG_RD));
+    EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_RA));
+    EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_AD));
+    EXPECT_FALSE(message_parse.getHeaderFlag(Message::HEADERFLAG_CD));
+    EXPECT_EQ(1, message_parse.getRRCount(Message::SECTION_QUESTION));
+    EXPECT_EQ(2, message_parse.getRRCount(Message::SECTION_ANSWER));
+    EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_AUTHORITY));
+    EXPECT_EQ(0, message_parse.getRRCount(Message::SECTION_ADDITIONAL));
+
+    // Only the header part should have been examined.
+    EXPECT_EQ(12, buffer.getPosition()); // 12 = size of the header section
+    EXPECT_TRUE(message_parse.beginQuestion() == message_parse.endQuestion());
+    EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ANSWER) ==
+                message_parse.endSection(Message::SECTION_ANSWER));
+    EXPECT_TRUE(message_parse.beginSection(Message::SECTION_AUTHORITY) ==
+                message_parse.endSection(Message::SECTION_AUTHORITY));
+    EXPECT_TRUE(message_parse.beginSection(Message::SECTION_ADDITIONAL) ==
+                message_parse.endSection(Message::SECTION_ADDITIONAL));
+}
+
 TEST_F(MessageTest, fromWire) {
+    // fromWire() isn't allowed in the render mode.
+    EXPECT_THROW(factoryFromFile(message_render, "message_fromWire1"),
+                 InvalidMessageOperation);
+
     factoryFromFile(message_parse, "message_fromWire1");
     EXPECT_EQ(0x1035, message_parse.getQid());
     EXPECT_EQ(Opcode::QUERY(), message_parse.getOpcode());
@@ -564,6 +606,87 @@ TEST_F(MessageTest, fromWire) {
     EXPECT_TRUE(it->isLast());
 }
 
+TEST_F(MessageTest, fromWireShortBuffer) {
+    // We trim a valid message (ending with an SOA RR) for one byte.
+    // fromWire() should throw an exception while parsing the trimmed RR.
+    UnitTestUtil::readWireData("message_fromWire22.wire", received_data);
+    InputBuffer buffer(&received_data[0], received_data.size() - 1);
+    EXPECT_THROW(message_parse.fromWire(buffer), InvalidBufferPosition);
+}
+
+TEST_F(MessageTest, fromWireCombineRRs) {
+    // This message contains 3 RRs in the answer section in the order of
+    // A, AAAA, A types.  fromWire() should combine the two A RRs into a
+    // single RRset by default.
+    factoryFromFile(message_parse, "message_fromWire19.wire");
+
+    RRsetIterator it = message_parse.beginSection(Message::SECTION_ANSWER);
+    RRsetIterator it_end = message_parse.endSection(Message::SECTION_ANSWER);
+    ASSERT_TRUE(it != it_end);
+    EXPECT_EQ(RRType::A(), (*it)->getType());
+    EXPECT_EQ(2, (*it)->getRdataCount());
+
+    ++it;
+    ASSERT_TRUE(it != it_end);
+    EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+    EXPECT_EQ(1, (*it)->getRdataCount());
+}
+
+// A helper function for a test pattern commonly used in several tests below.
+void
+preserveRRCheck(const Message& message, Message::Section section) {
+    RRsetIterator it = message.beginSection(section);
+    RRsetIterator it_end = message.endSection(section);
+    ASSERT_TRUE(it != it_end);
+    EXPECT_EQ(RRType::A(), (*it)->getType());
+    EXPECT_EQ(1, (*it)->getRdataCount());
+    EXPECT_EQ("192.0.2.1", (*it)->getRdataIterator()->getCurrent().toText());
+
+    ++it;
+    ASSERT_TRUE(it != it_end);
+    EXPECT_EQ(RRType::AAAA(), (*it)->getType());
+    EXPECT_EQ(1, (*it)->getRdataCount());
+    EXPECT_EQ("2001:db8::1", (*it)->getRdataIterator()->getCurrent().toText());
+
+    ++it;
+    ASSERT_TRUE(it != it_end);
+    EXPECT_EQ(RRType::A(), (*it)->getType());
+    EXPECT_EQ(1, (*it)->getRdataCount());
+    EXPECT_EQ("192.0.2.2", (*it)->getRdataIterator()->getCurrent().toText());
+}
+
+TEST_F(MessageTest, fromWirePreserveAnswer) {
+    // Using the same data as the previous test, but specify the PRESERVE_ORDER
+    // option.  The received order of RRs should be preserved, and each RR
+    // should be stored in a single RRset.
+    factoryFromFile(message_parse, "message_fromWire19.wire",
+                    Message::PRESERVE_ORDER);
+    {
+        SCOPED_TRACE("preserve answer RRs");
+        preserveRRCheck(message_parse, Message::SECTION_ANSWER);
+    }
+}
+
+TEST_F(MessageTest, fromWirePreserveAuthority) {
+    // Same for the previous test, but for the authority section.
+    factoryFromFile(message_parse, "message_fromWire20.wire",
+                    Message::PRESERVE_ORDER);
+    {
+        SCOPED_TRACE("preserve authority RRs");
+        preserveRRCheck(message_parse, Message::SECTION_AUTHORITY);
+    }
+}
+
+TEST_F(MessageTest, fromWirePreserveAdditional) {
+    // Same for the previous test, but for the additional section.
+    factoryFromFile(message_parse, "message_fromWire21.wire",
+                    Message::PRESERVE_ORDER);
+    {
+        SCOPED_TRACE("preserve additional RRs");
+        preserveRRCheck(message_parse, Message::SECTION_ADDITIONAL);
+    }
+}
+
 TEST_F(MessageTest, EDNS0ExtRcode) {
     // Extended Rcode = BADVERS
     factoryFromFile(message_parse, "message_fromWire10.wire");
diff --git a/src/lib/dns/tests/rdata_afsdb_unittest.cc b/src/lib/dns/tests/rdata_afsdb_unittest.cc
new file mode 100644
index 0000000..7df8d83
--- /dev/null
+++ b/src/lib/dns/tests/rdata_afsdb_unittest.cc
@@ -0,0 +1,210 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+const char* const afsdb_text = "1 afsdb.example.com.";
+const char* const afsdb_text2 = "0 root.example.com.";
+const char* const too_long_label("012345678901234567890123456789"
+        "0123456789012345678901234567890123");
+
+namespace {
+class Rdata_AFSDB_Test : public RdataTest {
+protected:
+    Rdata_AFSDB_Test() :
+        rdata_afsdb(string(afsdb_text)), rdata_afsdb2(string(afsdb_text2))
+    {}
+
+    const generic::AFSDB rdata_afsdb;
+    const generic::AFSDB rdata_afsdb2;
+    vector<uint8_t> expected_wire;
+};
+
+
+TEST_F(Rdata_AFSDB_Test, createFromText) {
+    EXPECT_EQ(1, rdata_afsdb.getSubtype());
+    EXPECT_EQ(Name("afsdb.example.com."), rdata_afsdb.getServer());
+
+    EXPECT_EQ(0, rdata_afsdb2.getSubtype());
+    EXPECT_EQ(Name("root.example.com."), rdata_afsdb2.getServer());
+}
+
+TEST_F(Rdata_AFSDB_Test, badText) {
+    // subtype is too large
+    EXPECT_THROW(const generic::AFSDB rdata_afsdb("99999999 afsdb.example.com."),
+                 InvalidRdataText);
+    // incomplete text
+    EXPECT_THROW(const generic::AFSDB rdata_afsdb("10"), InvalidRdataText);
+    EXPECT_THROW(const generic::AFSDB rdata_afsdb("SPOON"), InvalidRdataText);
+    EXPECT_THROW(const generic::AFSDB rdata_afsdb("1root.example.com."), InvalidRdataText);
+    // number of fields (must be 2) is incorrect
+    EXPECT_THROW(const generic::AFSDB rdata_afsdb("10 afsdb. example.com."),
+                 InvalidRdataText);
+    // bad name
+    EXPECT_THROW(const generic::AFSDB rdata_afsdb("1 afsdb.example.com." +
+                string(too_long_label)), TooLongLabel);
+}
+
+TEST_F(Rdata_AFSDB_Test, assignment) {
+    generic::AFSDB copy((string(afsdb_text2)));
+    copy = rdata_afsdb;
+    EXPECT_EQ(0, copy.compare(rdata_afsdb));
+
+    // Check if the copied data is valid even after the original is deleted
+    generic::AFSDB* copy2 = new generic::AFSDB(rdata_afsdb);
+    generic::AFSDB copy3((string(afsdb_text2)));
+    copy3 = *copy2;
+    delete copy2;
+    EXPECT_EQ(0, copy3.compare(rdata_afsdb));
+
+    // Self assignment
+    copy = copy;
+    EXPECT_EQ(0, copy.compare(rdata_afsdb));
+}
+
+TEST_F(Rdata_AFSDB_Test, createFromWire) {
+    // uncompressed names
+    EXPECT_EQ(0, rdata_afsdb.compare(
+                  *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+                                     "rdata_afsdb_fromWire1.wire")));
+    // compressed name
+    EXPECT_EQ(0, rdata_afsdb.compare(
+                  *rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+                                     "rdata_afsdb_fromWire2.wire", 13)));
+    // RDLENGTH is too short
+    EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+                                     "rdata_afsdb_fromWire3.wire"),
+                 InvalidRdataLength);
+    // RDLENGTH is too long
+    EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+                                      "rdata_afsdb_fromWire4.wire"),
+                 InvalidRdataLength);
+    // bogus server name, the error should be detected in the name
+    // constructor
+    EXPECT_THROW(rdataFactoryFromFile(RRType::AFSDB(), RRClass::IN(),
+                                      "rdata_afsdb_fromWire5.wire"),
+                 DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireBuffer) {
+    // construct actual data
+    rdata_afsdb.toWire(obuffer);
+
+    // construct expected data
+    UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+    // then compare them
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        obuffer.getData(), obuffer.getLength(),
+                        &expected_wire[0], expected_wire.size());
+
+    // clear buffer for the next test
+    obuffer.clear();
+
+    // construct actual data
+    Name("example.com.").toWire(obuffer);
+    rdata_afsdb2.toWire(obuffer);
+
+    // construct expected data
+    UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+    // then compare them
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        obuffer.getData(), obuffer.getLength(),
+                        &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toWireRenderer) {
+    // similar to toWireBuffer, but names in RDATA could be compressed due to
+    // preceding names.  Actually they must not be compressed according to
+    // RFC3597, and this test checks that.
+
+    // construct actual data
+    rdata_afsdb.toWire(renderer);
+
+    // construct expected data
+    UnitTestUtil::readWireData("rdata_afsdb_toWire1.wire", expected_wire);
+
+    // then compare them
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        renderer.getData(), renderer.getLength(),
+                        &expected_wire[0], expected_wire.size());
+
+    // clear renderer for the next test
+    renderer.clear();
+
+    // construct actual data
+    Name("example.com.").toWire(obuffer);
+    rdata_afsdb2.toWire(renderer);
+
+    // construct expected data
+    UnitTestUtil::readWireData("rdata_afsdb_toWire2.wire", expected_wire);
+
+    // then compare them
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        renderer.getData(), renderer.getLength(),
+                        &expected_wire[0], expected_wire.size());
+}
+
+TEST_F(Rdata_AFSDB_Test, toText) {
+    EXPECT_EQ(afsdb_text, rdata_afsdb.toText());
+    EXPECT_EQ(afsdb_text2, rdata_afsdb2.toText());
+}
+
+TEST_F(Rdata_AFSDB_Test, compare) {
+    // check reflexivity
+    EXPECT_EQ(0, rdata_afsdb.compare(rdata_afsdb));
+
+    // name must be compared in case-insensitive manner
+    EXPECT_EQ(0, rdata_afsdb.compare(generic::AFSDB("1 "
+                                "AFSDB.example.com.")));
+
+    const generic::AFSDB small1("10 afsdb.example.com");
+    const generic::AFSDB large1("65535 afsdb.example.com");
+    const generic::AFSDB large2("256 afsdb.example.com");
+
+    // confirm these are compared as unsigned values
+    EXPECT_GT(0, rdata_afsdb.compare(large1));
+    EXPECT_LT(0, large1.compare(rdata_afsdb));
+
+    // confirm these are compared in network byte order
+    EXPECT_GT(0, small1.compare(large2));
+    EXPECT_LT(0, large2.compare(small1));
+
+    // another AFSDB whose server name is larger than that of rdata_afsdb.
+    const generic::AFSDB large3("256 zzzzz.example.com");
+    EXPECT_GT(0, large2.compare(large3));
+    EXPECT_LT(0, large3.compare(large2));
+
+    // comparison attempt between incompatible RR types should be rejected
+    EXPECT_THROW(rdata_afsdb.compare(*rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_dhcid_unittest.cc b/src/lib/dns/tests/rdata_dhcid_unittest.cc
new file mode 100644
index 0000000..9df7043
--- /dev/null
+++ b/src/lib/dns/tests/rdata_dhcid_unittest.cc
@@ -0,0 +1,111 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/rdataclass.h>
+#include <util/encode/base64.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::util::encode;
+using namespace isc::dns::rdata;
+
+namespace {
+
+const string string_dhcid(
+                   "0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=");
+
+const in::DHCID rdata_dhcid(string_dhcid);
+
+class Rdata_DHCID_Test : public RdataTest {
+};
+
+TEST_F(Rdata_DHCID_Test, createFromString) {
+    const in::DHCID rdata_dhcid2(string_dhcid);
+    EXPECT_EQ(0, rdata_dhcid2.compare(rdata_dhcid));
+}
+
+TEST_F(Rdata_DHCID_Test, badBase64) {
+    EXPECT_THROW(const in::DHCID rdata_dhcid_bad("00"), isc::BadValue);
+}
+
+TEST_F(Rdata_DHCID_Test, badLength) {
+    EXPECT_THROW(const in::DHCID rdata_dhcid_bad("MDA="), InvalidRdataLength);
+}
+
+TEST_F(Rdata_DHCID_Test, copy) {
+    const in::DHCID rdata_dhcid2(rdata_dhcid);
+    EXPECT_EQ(0, rdata_dhcid.compare(rdata_dhcid2));
+}
+
+TEST_F(Rdata_DHCID_Test, createFromWire) {
+    EXPECT_EQ(0, rdata_dhcid.compare(
+                  *rdataFactoryFromFile(RRType("DHCID"), RRClass("IN"),
+                                        "rdata_dhcid_fromWire")));
+    // TBD: more tests
+}
+
+TEST_F(Rdata_DHCID_Test, toWireRenderer) {
+    rdata_dhcid.toWire(renderer);
+
+    vector<unsigned char> data;
+    UnitTestUtil::readWireData("rdata_dhcid_toWire", data);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, renderer.getData(),
+                        renderer.getLength(), &data[0], data.size());
+}
+
+TEST_F(Rdata_DHCID_Test, toWireBuffer) {
+    rdata_dhcid.toWire(obuffer);
+
+    vector<unsigned char> data;
+    UnitTestUtil::readWireData("rdata_dhcid_toWire", data);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+                        obuffer.getLength(), &data[0], data.size());
+}
+
+TEST_F(Rdata_DHCID_Test, toText) {
+    EXPECT_EQ(string_dhcid, rdata_dhcid.toText());
+}
+
+TEST_F(Rdata_DHCID_Test, getDHCIDDigest) {
+    const string string_dhcid1(encodeBase64(rdata_dhcid.getDigest()));
+
+    EXPECT_EQ(string_dhcid, string_dhcid1);
+}
+
+TEST_F(Rdata_DHCID_Test, compare) {
+    // trivial case: self equivalence
+    EXPECT_EQ(0, rdata_dhcid.compare(rdata_dhcid));
+
+    in::DHCID rdata_dhcid1("0YLQvtC/0L7Qu9GPINC00LLQsCDRgNGD0LHQu9GP");
+    in::DHCID rdata_dhcid2("0YLQvtC/0L7Qu9GPINGC0YDQuCDRgNGD0LHQu9GP");
+    in::DHCID rdata_dhcid3("0YLQvtC/0L7Qu9GPINGH0LXRgtGL0YDQtSDRgNGD0LHQu9GP");
+
+    EXPECT_LT(rdata_dhcid1.compare(rdata_dhcid2), 0);
+    EXPECT_GT(rdata_dhcid2.compare(rdata_dhcid1), 0);
+
+    EXPECT_LT(rdata_dhcid2.compare(rdata_dhcid3), 0);
+    EXPECT_GT(rdata_dhcid3.compare(rdata_dhcid2), 0);
+
+    // comparison attempt between incompatible RR types should be rejected
+    EXPECT_THROW(rdata_dhcid.compare(*rdata_nomatch), bad_cast); 
+}
+}
diff --git a/src/lib/dns/tests/rdata_ds_like_unittest.cc b/src/lib/dns/tests/rdata_ds_like_unittest.cc
new file mode 100644
index 0000000..9b29446
--- /dev/null
+++ b/src/lib/dns/tests/rdata_ds_like_unittest.cc
@@ -0,0 +1,171 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <algorithm>
+#include <string>
+
+#include <util/buffer.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+namespace {
+// hacks to make templates work
+template <class T>
+class RRTYPE : public RRType {
+public:
+    RRTYPE();
+};
+
+template<> RRTYPE<generic::DS>::RRTYPE() : RRType(RRType::DS()) {}
+template<> RRTYPE<generic::DLV>::RRTYPE() : RRType(RRType::DLV()) {}
+
+template <class DS_LIKE>
+class Rdata_DS_LIKE_Test : public RdataTest {
+protected:
+    static DS_LIKE const rdata_ds_like;
+};
+
+string ds_like_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+                   "5F0EB5C777586DE18DA6B5");
+
+template <class DS_LIKE>
+DS_LIKE const Rdata_DS_LIKE_Test<DS_LIKE>::rdata_ds_like(ds_like_txt);
+
+// The list of types we want to test.
+typedef testing::Types<generic::DS, generic::DLV> Implementations;
+
+TYPED_TEST_CASE(Rdata_DS_LIKE_Test, Implementations);
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toText_DS_LIKE) {
+    EXPECT_EQ(ds_like_txt, this->rdata_ds_like.toText());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, badText_DS_LIKE) {
+    EXPECT_THROW(const TypeParam ds_like2("99999 5 2 BEEF"), InvalidRdataText);
+    EXPECT_THROW(const TypeParam ds_like2("11111 555 2 BEEF"),
+                 InvalidRdataText);
+    EXPECT_THROW(const TypeParam ds_like2("11111 5 22222 BEEF"),
+                 InvalidRdataText);
+    EXPECT_THROW(const TypeParam ds_like2("11111 5 2"), InvalidRdataText);
+    EXPECT_THROW(const TypeParam ds_like2("GARBAGE IN"), InvalidRdataText);
+    // no space between the digest type and the digest.
+    EXPECT_THROW(const TypeParam ds_like2(
+                     "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+                     "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, createFromWire_DS_LIKE) {
+    EXPECT_EQ(0, this->rdata_ds_like.compare(
+              *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass::IN(),
+                                          "rdata_ds_fromWire")));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, assignment_DS_LIKE) {
+    TypeParam copy((string(ds_like_txt)));
+    copy = this->rdata_ds_like;
+    EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+
+    // Check if the copied data is valid even after the original is deleted
+    TypeParam* copy2 = new TypeParam(this->rdata_ds_like);
+    TypeParam copy3((string(ds_like_txt)));
+    copy3 = *copy2;
+    delete copy2;
+    EXPECT_EQ(0, copy3.compare(this->rdata_ds_like));
+
+    // Self assignment
+    copy = copy;
+    EXPECT_EQ(0, copy.compare(this->rdata_ds_like));
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, getTag_DS_LIKE) {
+    EXPECT_EQ(12892, this->rdata_ds_like.getTag());
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireRenderer) {
+    Rdata_DS_LIKE_Test<TypeParam>::renderer.skip(2);
+    TypeParam rdata_ds_like(ds_like_txt);
+    rdata_ds_like.toWire(this->renderer);
+
+    vector<unsigned char> data;
+    UnitTestUtil::readWireData("rdata_ds_fromWire", data);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        static_cast<const uint8_t*>
+                        (this->obuffer.getData()) + 2,
+                        this->obuffer.getLength() - 2,
+                        &data[2], data.size() - 2);
+}
+
+TYPED_TEST(Rdata_DS_LIKE_Test, toWireBuffer) {
+    TypeParam rdata_ds_like(ds_like_txt);
+    rdata_ds_like.toWire(this->obuffer);
+}
+
+string ds_like_txt1("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+                   "5F0EB5C777586DE18DA6B5");
+// different tag
+string ds_like_txt2("12893 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+                   "5F0EB5C777586DE18DA6B5");
+// different algorithm
+string ds_like_txt3("12892 6 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+                   "5F0EB5C777586DE18DA6B5");
+// different digest type
+string ds_like_txt4("12892 5 3 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+                   "5F0EB5C777586DE18DA6B5");
+// different digest
+string ds_like_txt5("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+                   "5F0EB5C777586DE18DA6B5");
+// different digest length
+string ds_like_txt6("12892 5 2 F2E184C0E1D615D20EB3C223ACED3B03C773DD952D"
+                   "5F0EB5C777586DE18DA6B555");
+
+TYPED_TEST(Rdata_DS_LIKE_Test, compare) {
+    // trivial case: self equivalence
+    EXPECT_EQ(0, TypeParam(ds_like_txt).compare(TypeParam(ds_like_txt)));
+
+    // non-equivalence tests
+    EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt2)), 0);
+    EXPECT_GT(TypeParam(ds_like_txt2).compare(TypeParam(ds_like_txt1)), 0);
+
+    EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt3)), 0);
+    EXPECT_GT(TypeParam(ds_like_txt3).compare(TypeParam(ds_like_txt1)), 0);
+
+    EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt4)), 0);
+    EXPECT_GT(TypeParam(ds_like_txt4).compare(TypeParam(ds_like_txt1)), 0);
+
+    EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt5)), 0);
+    EXPECT_GT(TypeParam(ds_like_txt5).compare(TypeParam(ds_like_txt1)), 0);
+
+    EXPECT_LT(TypeParam(ds_like_txt1).compare(TypeParam(ds_like_txt6)), 0);
+    EXPECT_GT(TypeParam(ds_like_txt6).compare(TypeParam(ds_like_txt1)), 0);
+
+    // comparison attempt between incompatible RR types should be rejected
+    EXPECT_THROW(this->rdata_ds_like.compare(*this->rdata_nomatch),
+                 bad_cast);
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_ds_unittest.cc b/src/lib/dns/tests/rdata_ds_unittest.cc
deleted file mode 100644
index 5988620..0000000
--- a/src/lib/dns/tests/rdata_ds_unittest.cc
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <string>
-
-#include <util/buffer.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-#include <dns/rrclass.h>
-#include <dns/rrtype.h>
-
-#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
-#include <dns/tests/rdata_unittest.h>
-
-using isc::UnitTestUtil;
-using namespace std;
-using namespace isc::dns;
-using namespace isc::util;
-using namespace isc::dns::rdata;
-
-namespace {
-class Rdata_DS_Test : public RdataTest {
-    // there's nothing to specialize
-};
-
-string ds_txt("12892 5 2 F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
-              "5F0EB5C777586DE18DA6B5");
-const generic::DS rdata_ds(ds_txt);
-
-TEST_F(Rdata_DS_Test, toText_DS) {
-    EXPECT_EQ(ds_txt, rdata_ds.toText());
-}
-
-TEST_F(Rdata_DS_Test, badText_DS) {
-    EXPECT_THROW(const generic::DS ds2("99999 5 2 BEEF"), InvalidRdataText);
-    EXPECT_THROW(const generic::DS ds2("11111 555 2 BEEF"), InvalidRdataText);
-    EXPECT_THROW(const generic::DS ds2("11111 5 22222 BEEF"), InvalidRdataText);
-    EXPECT_THROW(const generic::DS ds2("11111 5 2"), InvalidRdataText);
-    EXPECT_THROW(const generic::DS ds2("GARBAGE IN"), InvalidRdataText);
-}
-
-// this test currently fails; we must fix it, and then migrate the test to
-// badText_DS
-TEST_F(Rdata_DS_Test, DISABLED_badText_DS) {
-    // no space between the digest type and the digest.
-    EXPECT_THROW(const generic::DS ds2(
-                     "12892 5 2F1E184C0E1D615D20EB3C223ACED3B03C773DD952D"
-                     "5F0EB5C777586DE18DA6B5"), InvalidRdataText);
-}
-
-TEST_F(Rdata_DS_Test, createFromWire_DS) {
-    EXPECT_EQ(0, rdata_ds.compare(
-                  *rdataFactoryFromFile(RRType::DS(), RRClass::IN(),
-                                        "rdata_ds_fromWire")));
-}
-
-TEST_F(Rdata_DS_Test, getTag_DS) {
-    EXPECT_EQ(12892, rdata_ds.getTag());
-}
-
-TEST_F(Rdata_DS_Test, toWireRenderer) {
-    renderer.skip(2);
-    generic::DS rdata_ds(ds_txt);
-    rdata_ds.toWire(renderer);
-
-    vector<unsigned char> data;
-    UnitTestUtil::readWireData("rdata_ds_fromWire", data);
-    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
-                        static_cast<const uint8_t *>(obuffer.getData()) + 2,
-                        obuffer.getLength() - 2, &data[2], data.size() - 2);
-}
-
-TEST_F(Rdata_DS_Test, toWireBuffer) {
-    generic::DS rdata_ds(ds_txt);
-    rdata_ds.toWire(obuffer);
-}
-
-TEST_F(Rdata_DS_Test, compare) {
-    // trivial case: self equivalence
-    EXPECT_EQ(0, generic::DS(ds_txt).compare(generic::DS(ds_txt)));
-
-    // TODO: need more tests
-}
-
-}
diff --git a/src/lib/dns/tests/rdata_hinfo_unittest.cc b/src/lib/dns/tests/rdata_hinfo_unittest.cc
new file mode 100644
index 0000000..c52b2a0
--- /dev/null
+++ b/src/lib/dns/tests/rdata_hinfo_unittest.cc
@@ -0,0 +1,115 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_HINFO_Test : public RdataTest {
+};
+
+static uint8_t hinfo_rdata[] = {0x07,0x50,0x65,0x6e,0x74,0x69,0x75,0x6d,0x05,
+    0x4c,0x69,0x6e,0x75,0x78};
+static const char *hinfo_str = "\"Pentium\" \"Linux\"";
+static const char *hinfo_str1 = "\"Pen\\\"tium\" \"Linux\"";
+
+static const char *hinfo_str_small1 = "\"Lentium\" \"Linux\"";
+static const char *hinfo_str_small2 = "\"Pentium\" \"Kinux\"";
+static const char *hinfo_str_large1 = "\"Qentium\" \"Linux\"";
+static const char *hinfo_str_large2 = "\"Pentium\" \"UNIX\"";
+
+TEST_F(Rdata_HINFO_Test, createFromText) {
+    HINFO hinfo(hinfo_str);
+    EXPECT_EQ(string("Pentium"), hinfo.getCPU());
+    EXPECT_EQ(string("Linux"), hinfo.getOS());
+
+    // Test the text with double quotes in the middle of string
+    HINFO hinfo1(hinfo_str1);
+    EXPECT_EQ(string("Pen\"tium"), hinfo1.getCPU());
+}
+
+TEST_F(Rdata_HINFO_Test, badText) {
+    // Fields must be seperated by spaces
+    EXPECT_THROW(const HINFO hinfo("\"Pentium\"\"Linux\""), InvalidRdataText);
+    // Field cannot be missing
+    EXPECT_THROW(const HINFO hinfo("Pentium"), InvalidRdataText);
+    // The <character-string> cannot exceed 255 characters
+    string hinfo_str;
+    for (int i = 0; i < 257; ++i) {
+        hinfo_str += 'A';
+    }
+    hinfo_str += " Linux";
+    EXPECT_THROW(const HINFO hinfo(hinfo_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_HINFO_Test, createFromWire) {
+    InputBuffer input_buffer(hinfo_rdata, sizeof(hinfo_rdata));
+    HINFO hinfo(input_buffer, sizeof(hinfo_rdata));
+    EXPECT_EQ(string("Pentium"), hinfo.getCPU());
+    EXPECT_EQ(string("Linux"), hinfo.getOS());
+}
+
+TEST_F(Rdata_HINFO_Test, toText) {
+    HINFO hinfo(hinfo_str);
+    EXPECT_EQ(hinfo_str, hinfo.toText());
+}
+
+TEST_F(Rdata_HINFO_Test, toWire) {
+    HINFO hinfo(hinfo_str);
+    hinfo.toWire(obuffer);
+
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+                        obuffer.getLength(), hinfo_rdata, sizeof(hinfo_rdata));
+}
+
+TEST_F(Rdata_HINFO_Test, toWireRenderer) {
+    HINFO hinfo(hinfo_str);
+
+    hinfo.toWire(renderer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+                        obuffer.getLength(), hinfo_rdata, sizeof(hinfo_rdata));
+}
+
+TEST_F(Rdata_HINFO_Test, compare) {
+    HINFO hinfo(hinfo_str);
+    HINFO hinfo_small1(hinfo_str_small1);
+    HINFO hinfo_small2(hinfo_str_small2);
+    HINFO hinfo_large1(hinfo_str_large1);
+    HINFO hinfo_large2(hinfo_str_large2);
+
+    EXPECT_EQ(0, hinfo.compare(HINFO(hinfo_str)));
+    EXPECT_EQ(1, hinfo.compare(HINFO(hinfo_str_small1)));
+    EXPECT_EQ(1, hinfo.compare(HINFO(hinfo_str_small2)));
+    EXPECT_EQ(-1, hinfo.compare(HINFO(hinfo_str_large1)));
+    EXPECT_EQ(-1, hinfo.compare(HINFO(hinfo_str_large2)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_minfo_unittest.cc b/src/lib/dns/tests/rdata_minfo_unittest.cc
new file mode 100644
index 0000000..30c7c39
--- /dev/null
+++ b/src/lib/dns/tests/rdata_minfo_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for generic
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+// minfo text
+const char* const minfo_txt = "rmailbox.example.com. emailbox.example.com.";
+const char* const minfo_txt2 = "root.example.com. emailbox.example.com.";
+const char* const too_long_label = "01234567890123456789012345678901234567"
+                                   "89012345678901234567890123";
+
+namespace {
+class Rdata_MINFO_Test : public RdataTest {
+public:
+    Rdata_MINFO_Test():
+        rdata_minfo(string(minfo_txt)), rdata_minfo2(string(minfo_txt2)) {}
+
+    const generic::MINFO rdata_minfo;
+    const generic::MINFO rdata_minfo2;
+};
+
+
+TEST_F(Rdata_MINFO_Test, createFromText) {
+    EXPECT_EQ(Name("rmailbox.example.com."), rdata_minfo.getRmailbox());
+    EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo.getEmailbox());
+
+    EXPECT_EQ(Name("root.example.com."), rdata_minfo2.getRmailbox());
+    EXPECT_EQ(Name("emailbox.example.com."), rdata_minfo2.getEmailbox());
+}
+
+TEST_F(Rdata_MINFO_Test, badText) {
+    // incomplete text
+    EXPECT_THROW(generic::MINFO("root.example.com."),
+                 InvalidRdataText);
+    // number of fields (must be 2) is incorrect
+    EXPECT_THROW(generic::MINFO("root.example.com emailbox.example.com. "
+                                "example.com."),
+                 InvalidRdataText);
+    // bad rmailbox name
+    EXPECT_THROW(generic::MINFO("root.example.com. emailbox.example.com." +
+                                string(too_long_label)),
+                 TooLongLabel);
+    // bad emailbox name
+    EXPECT_THROW(generic::MINFO("root.example.com."  +
+                          string(too_long_label) + " emailbox.example.com."),
+                 TooLongLabel);
+}
+
+TEST_F(Rdata_MINFO_Test, createFromWire) {
+    // uncompressed names
+    EXPECT_EQ(0, rdata_minfo.compare(
+                  *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+                                     "rdata_minfo_fromWire1.wire")));
+    // compressed names
+    EXPECT_EQ(0, rdata_minfo.compare(
+                  *rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+                                     "rdata_minfo_fromWire2.wire", 15)));
+    // RDLENGTH is too short
+    EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+                                     "rdata_minfo_fromWire3.wire"),
+                 InvalidRdataLength);
+    // RDLENGTH is too long
+    EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+                                      "rdata_minfo_fromWire4.wire"),
+                 InvalidRdataLength);
+    // bogus rmailbox name, the error should be detected in the name
+    // constructor
+    EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+                                      "rdata_minfo_fromWire5.wire"),
+                 DNSMessageFORMERR);
+    // bogus emailbox name, the error should be detected in the name
+    // constructor
+    EXPECT_THROW(rdataFactoryFromFile(RRType::MINFO(), RRClass::IN(),
+                                      "rdata_minfo_fromWire6.wire"),
+                 DNSMessageFORMERR);
+}
+
+TEST_F(Rdata_MINFO_Test, assignment) {
+    generic::MINFO copy((string(minfo_txt2)));
+    copy = rdata_minfo;
+    EXPECT_EQ(0, copy.compare(rdata_minfo));
+
+    // Check if the copied data is valid even after the original is deleted
+    generic::MINFO* copy2 = new generic::MINFO(rdata_minfo);
+    generic::MINFO copy3((string(minfo_txt2)));
+    copy3 = *copy2;
+    delete copy2;
+    EXPECT_EQ(0, copy3.compare(rdata_minfo));
+
+    // Self assignment
+    copy = copy;
+    EXPECT_EQ(0, copy.compare(rdata_minfo));
+}
+
+TEST_F(Rdata_MINFO_Test, toWireBuffer) {
+    rdata_minfo.toWire(obuffer);
+    vector<unsigned char> data;
+    UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed1.wire", data);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        static_cast<const uint8_t *>(obuffer.getData()),
+                        obuffer.getLength(), &data[0], data.size());
+
+    obuffer.clear();
+    rdata_minfo2.toWire(obuffer);
+    vector<unsigned char> data2;
+    UnitTestUtil::readWireData("rdata_minfo_toWireUncompressed2.wire", data2);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        static_cast<const uint8_t *>(obuffer.getData()),
+                        obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toWireRenderer) {
+    rdata_minfo.toWire(renderer);
+    vector<unsigned char> data;
+    UnitTestUtil::readWireData("rdata_minfo_toWire1.wire", data);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        static_cast<const uint8_t *>(obuffer.getData()),
+                        obuffer.getLength(), &data[0], data.size());
+    renderer.clear();
+    rdata_minfo2.toWire(renderer);
+    vector<unsigned char> data2;
+    UnitTestUtil::readWireData("rdata_minfo_toWire2.wire", data2);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        static_cast<const uint8_t *>(obuffer.getData()),
+                        obuffer.getLength(), &data2[0], data2.size());
+}
+
+TEST_F(Rdata_MINFO_Test, toText) {
+    EXPECT_EQ(minfo_txt, rdata_minfo.toText());
+    EXPECT_EQ(minfo_txt2, rdata_minfo2.toText());
+}
+
+TEST_F(Rdata_MINFO_Test, compare) {
+    // check reflexivity
+    EXPECT_EQ(0, rdata_minfo.compare(rdata_minfo));
+
+    // names must be compared in case-insensitive manner
+    EXPECT_EQ(0, rdata_minfo.compare(generic::MINFO("RMAILBOX.example.com. "
+                                                  "emailbox.EXAMPLE.com.")));
+
+    // another MINFO whose rmailbox name is larger than that of rdata_minfo.
+    const generic::MINFO large1_minfo("zzzzzzzz.example.com. "
+                                      "emailbox.example.com.");
+    EXPECT_GT(0, rdata_minfo.compare(large1_minfo));
+    EXPECT_LT(0, large1_minfo.compare(rdata_minfo));
+
+    // another MINFO whose emailbox name is larger than that of rdata_minfo.
+    const generic::MINFO large2_minfo("rmailbox.example.com. "
+                                      "zzzzzzzzzzz.example.com.");
+    EXPECT_GT(0, rdata_minfo.compare(large2_minfo));
+    EXPECT_LT(0, large2_minfo.compare(rdata_minfo));
+
+    // comparison attempt between incompatible RR types should be rejected
+    EXPECT_THROW(rdata_minfo.compare(*RdataTest::rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_naptr_unittest.cc b/src/lib/dns/tests/rdata_naptr_unittest.cc
new file mode 100644
index 0000000..f905943
--- /dev/null
+++ b/src/lib/dns/tests/rdata_naptr_unittest.cc
@@ -0,0 +1,178 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+using namespace isc::dns::rdata::generic;
+
+namespace {
+class Rdata_NAPTR_Test : public RdataTest {
+};
+
+// 10 100 "S" "SIP+D2U" "" _sip._udp.example.com.
+static uint8_t naptr_rdata[] = {0x00,0x0a,0x00,0x64,0x01,0x53,0x07,0x53,0x49,
+    0x50,0x2b,0x44,0x32,0x55,0x00,0x04,0x5f,0x73,0x69,0x70,0x04,0x5f,0x75,0x64,
+    0x70,0x07,0x65,0x78,0x61,0x6d,0x70,0x6c,0x65,0x03,0x63,0x6f,0x6d,0x00};
+
+static const char *naptr_str =
+    "10 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str2 =
+    "10 100 S SIP+D2U \"\" _sip._udp.example.com.";
+
+static const char *naptr_str_small1 =
+    "9 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small2 =
+    "10 90 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small3 =
+    "10 100 \"R\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small4 =
+    "10 100 \"S\" \"SIP+C2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_small5 =
+    "10 100 \"S\" \"SIP+D2U\" \"\" _rip._udp.example.com.";
+
+static const char *naptr_str_large1 =
+    "11 100 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large2 =
+    "10 110 \"S\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large3 =
+    "10 100 \"T\" \"SIP+D2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large4 =
+    "10 100 \"S\" \"SIP+E2U\" \"\" _sip._udp.example.com.";
+static const char *naptr_str_large5 =
+    "10 100 \"S\" \"SIP+D2U\" \"\" _tip._udp.example.com.";
+
+TEST_F(Rdata_NAPTR_Test, createFromText) {
+    NAPTR naptr(naptr_str);
+    EXPECT_EQ(10, naptr.getOrder());
+    EXPECT_EQ(100, naptr.getPreference());
+    EXPECT_EQ(string("S"), naptr.getFlags());
+    EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+    EXPECT_EQ(string(""), naptr.getRegexp());
+    EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+
+    // Test <char-string> that separated by space
+    NAPTR naptr2(naptr_str2);
+    EXPECT_EQ(string("S"), naptr2.getFlags());
+    EXPECT_EQ(string("SIP+D2U"), naptr2.getServices());
+}
+
+TEST_F(Rdata_NAPTR_Test, badText) {
+    // Order number cannot exceed 65535
+    EXPECT_THROW(const NAPTR naptr("65536 10 S SIP \"\" _sip._udp.example.com."),
+                 InvalidRdataText);
+    // Preference number cannot exceed 65535
+    EXPECT_THROW(const NAPTR naptr("100 65536 S SIP \"\" _sip._udp.example.com."),
+                 InvalidRdataText);
+    // No regexp given
+    EXPECT_THROW(const NAPTR naptr("100 10 S SIP _sip._udp.example.com."),
+                 InvalidRdataText);
+    // The double quotes seperator must match
+    EXPECT_THROW(const NAPTR naptr("100 10 \"S SIP \"\" _sip._udp.example.com."),
+                 InvalidRdataText);
+    // Order or preference cannot be missed
+    EXPECT_THROW(const NAPTR naptr("10 \"S\" SIP \"\" _sip._udp.example.com."),
+                 InvalidRdataText);
+    // Fields must be seperated by spaces
+    EXPECT_THROW(const NAPTR naptr("100 10S SIP \"\" _sip._udp.example.com."),
+                 InvalidRdataText);
+    EXPECT_THROW(const NAPTR naptr("100 10 \"S\"\"SIP\" \"\" _sip._udp.example.com."),
+                 InvalidRdataText);
+    // Field cannot be missing
+    EXPECT_THROW(const NAPTR naptr("100 10 \"S\""), InvalidRdataText);
+
+    // The <character-string> cannot exceed 255 characters
+    string naptr_str;
+    naptr_str += "100 10 ";
+    for (int i = 0; i < 257; ++i) {
+        naptr_str += 'A';
+    }
+    naptr_str += " SIP \"\" _sip._udp.example.com.";
+    EXPECT_THROW(const NAPTR naptr(naptr_str), CharStringTooLong);
+}
+
+TEST_F(Rdata_NAPTR_Test, createFromWire) {
+    InputBuffer input_buffer(naptr_rdata, sizeof(naptr_rdata));
+    NAPTR naptr(input_buffer, sizeof(naptr_rdata));
+    EXPECT_EQ(10, naptr.getOrder());
+    EXPECT_EQ(100, naptr.getPreference());
+    EXPECT_EQ(string("S"), naptr.getFlags());
+    EXPECT_EQ(string("SIP+D2U"), naptr.getServices());
+    EXPECT_EQ(string(""), naptr.getRegexp());
+    EXPECT_EQ(Name("_sip._udp.example.com."), naptr.getReplacement());
+}
+
+TEST_F(Rdata_NAPTR_Test, toWire) {
+    NAPTR naptr(naptr_str);
+    naptr.toWire(obuffer);
+
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+                        obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toWireRenderer) {
+    NAPTR naptr(naptr_str);
+
+    naptr.toWire(renderer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData, obuffer.getData(),
+                        obuffer.getLength(), naptr_rdata, sizeof(naptr_rdata));
+}
+
+TEST_F(Rdata_NAPTR_Test, toText) {
+    NAPTR naptr(naptr_str);
+    EXPECT_EQ(naptr_str, naptr.toText());
+}
+
+TEST_F(Rdata_NAPTR_Test, compare) {
+    NAPTR naptr(naptr_str);
+    NAPTR naptr_small1(naptr_str_small1);
+    NAPTR naptr_small2(naptr_str_small2);
+    NAPTR naptr_small3(naptr_str_small3);
+    NAPTR naptr_small4(naptr_str_small4);
+    NAPTR naptr_small5(naptr_str_small5);
+    NAPTR naptr_large1(naptr_str_large1);
+    NAPTR naptr_large2(naptr_str_large2);
+    NAPTR naptr_large3(naptr_str_large3);
+    NAPTR naptr_large4(naptr_str_large4);
+    NAPTR naptr_large5(naptr_str_large5);
+
+    EXPECT_EQ(0, naptr.compare(NAPTR(naptr_str)));
+    EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small1)));
+    EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small2)));
+    EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small3)));
+    EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small4)));
+    EXPECT_EQ(1, naptr.compare(NAPTR(naptr_str_small5)));
+    EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large1)));
+    EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large2)));
+    EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large3)));
+    EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large4)));
+    EXPECT_EQ(-1, naptr.compare(NAPTR(naptr_str_large5)));
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_nsec_unittest.cc b/src/lib/dns/tests/rdata_nsec_unittest.cc
index 5aa1e9c..f081cd8 100644
--- a/src/lib/dns/tests/rdata_nsec_unittest.cc
+++ b/src/lib/dns/tests/rdata_nsec_unittest.cc
@@ -89,4 +89,10 @@ TEST_F(Rdata_NSEC_Test, assign) {
     EXPECT_EQ(0, rdata_nsec.compare(rdata_nsec2));
 }
 
+TEST_F(Rdata_NSEC_Test, getNextName) {
+    // The implementation is quite trivial, so we simply check it's actually
+    // defined and does work as intended in a simple case.
+    EXPECT_EQ(Name("www2.isc.org"), generic::NSEC((nsec_txt)).getNextName());
+}
+
 }
diff --git a/src/lib/dns/tests/rdata_rrsig_unittest.cc b/src/lib/dns/tests/rdata_rrsig_unittest.cc
index 903021f..3324b99 100644
--- a/src/lib/dns/tests/rdata_rrsig_unittest.cc
+++ b/src/lib/dns/tests/rdata_rrsig_unittest.cc
@@ -47,7 +47,7 @@ TEST_F(Rdata_RRSIG_Test, fromText) {
                      "f49t+sXKPzbipN9g+s1ZPiIyofc=");
     generic::RRSIG rdata_rrsig(rrsig_txt);
     EXPECT_EQ(rrsig_txt, rdata_rrsig.toText());
-
+    EXPECT_EQ(isc::dns::RRType::A(), rdata_rrsig.typeCovered());
 }
 
 TEST_F(Rdata_RRSIG_Test, badText) {
diff --git a/src/lib/dns/tests/rdata_soa_unittest.cc b/src/lib/dns/tests/rdata_soa_unittest.cc
index 63fe1f7..07c24d5 100644
--- a/src/lib/dns/tests/rdata_soa_unittest.cc
+++ b/src/lib/dns/tests/rdata_soa_unittest.cc
@@ -74,4 +74,9 @@ TEST_F(Rdata_SOA_Test, toText) {
     EXPECT_EQ("ns.example.com. root.example.com. "
               "2010012601 3600 300 3600000 1200", rdata_soa.toText());
 }
+
+TEST_F(Rdata_SOA_Test, getSerial) {
+    EXPECT_EQ(2010012601, rdata_soa.getSerial().getValue());
+}
+
 }
diff --git a/src/lib/dns/tests/rdata_srv_unittest.cc b/src/lib/dns/tests/rdata_srv_unittest.cc
new file mode 100644
index 0000000..3394f43
--- /dev/null
+++ b/src/lib/dns/tests/rdata_srv_unittest.cc
@@ -0,0 +1,173 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for generic
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/messagerenderer.h>
+#include <dns/rdata.h>
+#include <dns/rdataclass.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+namespace {
+class Rdata_SRV_Test : public RdataTest {
+    // there's nothing to specialize
+};
+
+string srv_txt("1 5 1500 a.example.com.");
+string srv_txt2("1 5 1400 example.com.");
+string too_long_label("012345678901234567890123456789"
+    "0123456789012345678901234567890123");
+
+// 1 5 1500 a.example.com.
+const uint8_t wiredata_srv[] = {
+    0x00, 0x01, 0x00, 0x05, 0x05, 0xdc, 0x01, 0x61, 0x07, 0x65, 0x78,
+    0x61, 0x6d, 0x70, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00};
+// 1 5 1400 example.com.
+const uint8_t wiredata_srv2[] = {
+    0x00, 0x01, 0x00, 0x05, 0x05, 0x78, 0x07, 0x65, 0x78, 0x61, 0x6d,
+    0x70, 0x6c, 0x65, 0x03, 0x63, 0x6f, 0x6d, 0x00};
+
+const in::SRV rdata_srv(srv_txt);
+const in::SRV rdata_srv2(srv_txt2);
+
+TEST_F(Rdata_SRV_Test, createFromText) {
+    EXPECT_EQ(1, rdata_srv.getPriority());
+    EXPECT_EQ(5, rdata_srv.getWeight());
+    EXPECT_EQ(1500, rdata_srv.getPort());
+    EXPECT_EQ(Name("a.example.com."), rdata_srv.getTarget());
+}
+
+TEST_F(Rdata_SRV_Test, badText) {
+    // priority is too large (2814...6 is 2^48)
+    EXPECT_THROW(in::SRV("281474976710656 5 1500 a.example.com."),
+                 InvalidRdataText);
+    // weight is too large
+    EXPECT_THROW(in::SRV("1 281474976710656 1500 a.example.com."),
+                 InvalidRdataText);
+    // port is too large
+    EXPECT_THROW(in::SRV("1 5 281474976710656 a.example.com."),
+                 InvalidRdataText);
+    // incomplete text
+    EXPECT_THROW(in::SRV("1 5 a.example.com."),
+                 InvalidRdataText);
+    EXPECT_THROW(in::SRV("1 5 1500a.example.com."),
+                 InvalidRdataText);
+    // bad name
+    EXPECT_THROW(in::SRV("1 5 1500 a.example.com." + too_long_label),
+                 TooLongLabel);
+}
+
+TEST_F(Rdata_SRV_Test, assignment) {
+    in::SRV copy((string(srv_txt2)));
+    copy = rdata_srv;
+    EXPECT_EQ(0, copy.compare(rdata_srv));
+
+    // Check if the copied data is valid even after the original is deleted
+    in::SRV* copy2 = new in::SRV(rdata_srv);
+    in::SRV copy3((string(srv_txt2)));
+    copy3 = *copy2;
+    delete copy2;
+    EXPECT_EQ(0, copy3.compare(rdata_srv));
+
+    // Self assignment
+    copy = copy;
+    EXPECT_EQ(0, copy.compare(rdata_srv));
+}
+
+TEST_F(Rdata_SRV_Test, createFromWire) {
+    EXPECT_EQ(0, rdata_srv.compare(
+                  *rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+                                        "rdata_srv_fromWire")));
+    // RDLENGTH is too short
+    EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+                                      "rdata_srv_fromWire", 23),
+                 InvalidRdataLength);
+    // RDLENGTH is too long
+    EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+                                      "rdata_srv_fromWire", 46),
+                 InvalidRdataLength);
+    // incomplete name.  the error should be detected in the name constructor
+    EXPECT_THROW(rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+                                      "rdata_cname_fromWire", 69),
+                 DNSMessageFORMERR);
+    // parse compressed target name
+    EXPECT_EQ(0, rdata_srv.compare(
+                  *rdataFactoryFromFile(RRType("SRV"), RRClass("IN"),
+                                      "rdata_srv_fromWire", 89)));
+}
+
+TEST_F(Rdata_SRV_Test, toWireBuffer) {
+    rdata_srv.toWire(obuffer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        obuffer.getData(), obuffer.getLength(),
+                        wiredata_srv, sizeof(wiredata_srv));
+    obuffer.clear();
+    rdata_srv2.toWire(obuffer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        obuffer.getData(), obuffer.getLength(),
+                        wiredata_srv2, sizeof(wiredata_srv2));
+}
+
+TEST_F(Rdata_SRV_Test, toWireRenderer) {
+    rdata_srv.toWire(renderer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        obuffer.getData(), obuffer.getLength(),
+                        wiredata_srv, sizeof(wiredata_srv));
+    renderer.clear();
+    rdata_srv2.toWire(renderer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        obuffer.getData(), obuffer.getLength(),
+                        wiredata_srv2, sizeof(wiredata_srv2));
+}
+
+TEST_F(Rdata_SRV_Test, toText) {
+    EXPECT_EQ(srv_txt, rdata_srv.toText());
+    EXPECT_EQ(srv_txt2, rdata_srv2.toText());
+}
+
+TEST_F(Rdata_SRV_Test, compare) {
+    // test RDATAs, sorted in the ascendent order.
+    vector<in::SRV> compare_set;
+    compare_set.push_back(in::SRV("1 5 1500 a.example.com."));
+    compare_set.push_back(in::SRV("2 5 1500 a.example.com."));
+    compare_set.push_back(in::SRV("2 6 1500 a.example.com."));
+    compare_set.push_back(in::SRV("2 6 1600 a.example.com."));
+    compare_set.push_back(in::SRV("2 6 1600 example.com."));
+
+    EXPECT_EQ(0, compare_set[0].compare(
+                  in::SRV("1 5 1500 a.example.com.")));
+
+    vector<in::SRV>::const_iterator it;
+    vector<in::SRV>::const_iterator it_end = compare_set.end();
+    for (it = compare_set.begin(); it != it_end - 1; ++it) {
+        EXPECT_GT(0, (*it).compare(*(it + 1)));
+        EXPECT_LT(0, (*(it + 1)).compare(*it));
+    }
+
+    // comparison attempt between incompatible RR types should be rejected
+    EXPECT_THROW(rdata_srv.compare(*RdataTest::rdata_nomatch), bad_cast);
+}
+}
diff --git a/src/lib/dns/tests/rdata_txt_like_unittest.cc b/src/lib/dns/tests/rdata_txt_like_unittest.cc
new file mode 100644
index 0000000..981265e
--- /dev/null
+++ b/src/lib/dns/tests/rdata_txt_like_unittest.cc
@@ -0,0 +1,261 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// This is the common code for TXT and SPF tests.
+
+#include <util/buffer.h>
+#include <dns/exceptions.h>
+#include <dns/rdataclass.h>
+#include <gtest/gtest.h>
+
+#include <dns/tests/unittest_util.h>
+#include <dns/tests/rdata_unittest.h>
+
+using isc::UnitTestUtil;
+using namespace std;
+using namespace isc::dns;
+using namespace isc::util;
+using namespace isc::dns::rdata;
+
+
+template<class T>
+class RRTYPE : public RRType {
+public:
+    RRTYPE();
+};
+
+template<> RRTYPE<generic::TXT>::RRTYPE() : RRType(RRType::TXT()) {}
+template<> RRTYPE<generic::SPF>::RRTYPE() : RRType(RRType::SPF()) {}
+
+namespace {
+const uint8_t wiredata_txt_like[] = {
+    sizeof("Test String") - 1,
+    'T', 'e', 's', 't', ' ', 'S', 't', 'r', 'i', 'n', 'g'
+};
+
+const uint8_t wiredata_nulltxt[] = { 0 };
+vector<uint8_t> wiredata_longesttxt(256, 'a');
+
+template<class TXT_LIKE>
+class Rdata_TXT_LIKE_Test : public RdataTest {
+protected:
+    Rdata_TXT_LIKE_Test() {
+        wiredata_longesttxt[0] = 255; // adjust length
+    }
+
+    static const TXT_LIKE rdata_txt_like;
+    static const TXT_LIKE rdata_txt_like_empty;
+    static const TXT_LIKE rdata_txt_like_quoted;
+};
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like("Test String");
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like_empty("");
+
+template<class TXT_LIKE>
+const TXT_LIKE Rdata_TXT_LIKE_Test<TXT_LIKE>::rdata_txt_like_quoted
+                                                          ("\"Test String\"");
+
+// The list of types we want to test.
+typedef testing::Types<generic::TXT, generic::SPF> Implementations;
+
+TYPED_TEST_CASE(Rdata_TXT_LIKE_Test, Implementations);
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, createFromText) {
+    // normal case is covered in toWireBuffer.
+
+    // surrounding double-quotes shouldn't change the result.
+    EXPECT_EQ(0, this->rdata_txt_like.compare(this->rdata_txt_like_quoted));
+
+    // Null character-string.
+    this->obuffer.clear();
+    TypeParam(string("")).toWire(this->obuffer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        this->obuffer.getData(),
+                        this->obuffer.getLength(),
+                        wiredata_nulltxt, sizeof(wiredata_nulltxt));
+
+    // Longest possible character-string.
+    this->obuffer.clear();
+    TypeParam(string(255, 'a')).toWire(this->obuffer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        this->obuffer.getData(),
+                        this->obuffer.getLength(),
+                        &wiredata_longesttxt[0], wiredata_longesttxt.size());
+
+    // Too long text for a valid character-string.
+    EXPECT_THROW(TypeParam(string(256, 'a')), CharStringTooLong);
+
+    // The escape character makes the double quote a part of character-string,
+    // so this is invalid input and should be rejected.
+    EXPECT_THROW(TypeParam("\"Test String\\\""), InvalidRdataText);
+
+    // Terminating double-quote is provided, so this is valid, but in this
+    // version of implementation we reject escaped characters.
+    EXPECT_THROW(TypeParam("\"Test String\\\"\""), InvalidRdataText);
+}
+
+void
+makeLargest(vector<uint8_t>& data) {
+    uint8_t ch = 0;
+
+    // create 255 sets of character-strings, each of which has the longest
+    // length (255bytes string + 1-byte length field)
+    for (int i = 0; i < 255; ++i, ++ch) {
+        data.push_back(255);
+        data.insert(data.end(), 255, ch);
+    }
+    // the last character-string should be 255 bytes (including the one-byte
+    // length field) in length so that the total length should be in the range
+    // of 16-bit integers.
+    data.push_back(254);
+    data.insert(data.end(), 254, ch);
+
+    assert(data.size() == 65535);
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, createFromWire) {
+    EXPECT_EQ(0, this->rdata_txt_like.compare(
+                  *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+                                        "rdata_txt_fromWire1")));
+
+    // Empty character string
+    EXPECT_EQ(0, this->rdata_txt_like_empty.compare(
+                  *this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+                                        "rdata_txt_fromWire2.wire")));
+
+    // Multiple character strings
+    this->obuffer.clear();
+    this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+                         "rdata_txt_fromWire3.wire")->toWire(this->obuffer);
+    // the result should be 'wiredata_txt' repeated twice
+    vector<uint8_t> expected_data(wiredata_txt_like, wiredata_txt_like +
+                                  sizeof(wiredata_txt_like));
+    expected_data.insert(expected_data.end(), wiredata_txt_like,
+                         wiredata_txt_like + sizeof(wiredata_txt_like));
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        this->obuffer.getData(),
+                        this->obuffer.getLength(),
+                        &expected_data[0], expected_data.size());
+
+    // Largest length of data.  There's nothing special, but should be
+    // constructed safely, and the content should be identical to the original
+    // data.
+    vector<uint8_t> largest_txt_like_data;
+    makeLargest(largest_txt_like_data);
+    InputBuffer ibuffer(&largest_txt_like_data[0],
+                        largest_txt_like_data.size());
+    TypeParam largest_txt_like(ibuffer, largest_txt_like_data.size());
+    this->obuffer.clear();
+    largest_txt_like.toWire(this->obuffer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        this->obuffer.getData(),
+                        this->obuffer.getLength(),
+                        &largest_txt_like_data[0],
+                        largest_txt_like_data.size());
+
+    // rdlen parameter is out of range.  This is a rare event because we'd
+    // normally call the constructor via a polymorphic wrapper, where the
+    // length is validated.  But this should be checked explicitly.
+    InputBuffer ibuffer2(&largest_txt_like_data[0],
+                         largest_txt_like_data.size());
+    EXPECT_THROW(TypeParam(ibuffer2, 65536), InvalidRdataLength);
+
+    // RDATA is empty, which is invalid for TXT_LIKE.
+    EXPECT_THROW(this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+                                      "rdata_txt_fromWire4.wire"),
+                 DNSMessageFORMERR);
+
+    // character-string length is too large, which could cause overrun.
+    EXPECT_THROW(this->rdataFactoryFromFile(RRTYPE<TypeParam>(), RRClass("IN"),
+                                      "rdata_txt_fromWire5.wire"),
+                 DNSMessageFORMERR);
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toWireBuffer) {
+    this->rdata_txt_like.toWire(this->obuffer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        this->obuffer.getData(),
+                        this->obuffer.getLength(),
+                        wiredata_txt_like, sizeof(wiredata_txt_like));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toWireRenderer) {
+    this->rdata_txt_like.toWire(this->renderer);
+    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
+                        this->renderer.getData(),
+                        this->renderer.getLength(),
+                        wiredata_txt_like, sizeof(wiredata_txt_like));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, toText) {
+    EXPECT_EQ("\"Test String\"", this->rdata_txt_like.toText());
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, assignment) {
+    TypeParam rdata1("assignment1");
+    TypeParam rdata2("assignment2");
+    rdata1 = rdata2;
+    EXPECT_EQ(0, rdata2.compare(rdata1));
+
+    // Check if the copied data is valid even after the original is deleted
+    TypeParam* rdata3 = new TypeParam(rdata1);
+    TypeParam rdata4("assignment3");
+    rdata4 = *rdata3;
+    delete rdata3;
+    EXPECT_EQ(0, rdata4.compare(rdata1));
+
+    // Self assignment
+    rdata2 = rdata2;
+    EXPECT_EQ(0, rdata2.compare(rdata1));
+}
+
+TYPED_TEST(Rdata_TXT_LIKE_Test, compare) {
+    string const txt1("aaaaaaaa");
+    string const txt2("aaaaaaaaaa");
+    string const txt3("bbbbbbbb");
+    string const txt4(129, 'a');
+    string const txt5(128, 'b');
+
+    EXPECT_EQ(TypeParam(txt1).compare(TypeParam(txt1)), 0);
+
+    EXPECT_LT(TypeParam("").compare(TypeParam(txt1)), 0);
+    EXPECT_GT(TypeParam(txt1).compare(TypeParam("")), 0);
+
+    EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt2)), 0);
+    EXPECT_GT(TypeParam(txt2).compare(TypeParam(txt1)), 0);
+
+    EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt3)), 0);
+    EXPECT_GT(TypeParam(txt3).compare(TypeParam(txt1)), 0);
+
+    // we're comparing the data raw, starting at the length octet, so a shorter
+    // string sorts before a longer one no matter the lexicopraphical order
+    EXPECT_LT(TypeParam(txt3).compare(TypeParam(txt2)), 0);
+    EXPECT_GT(TypeParam(txt2).compare(TypeParam(txt3)), 0);
+
+    // to make sure the length octet compares unsigned
+    EXPECT_LT(TypeParam(txt1).compare(TypeParam(txt4)), 0);
+    EXPECT_GT(TypeParam(txt4).compare(TypeParam(txt1)), 0);
+
+    EXPECT_LT(TypeParam(txt5).compare(TypeParam(txt4)), 0);
+    EXPECT_GT(TypeParam(txt4).compare(TypeParam(txt5)), 0);
+
+    // comparison attempt between incompatible RR types should be rejected
+    EXPECT_THROW(TypeParam(txt1).compare(*this->rdata_nomatch),
+                 bad_cast);
+}
+
+}
diff --git a/src/lib/dns/tests/rdata_txt_unittest.cc b/src/lib/dns/tests/rdata_txt_unittest.cc
deleted file mode 100644
index e5f8ac9..0000000
--- a/src/lib/dns/tests/rdata_txt_unittest.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
-//
-// Permission to use, copy, modify, and/or distribute this software for any
-// purpose with or without fee is hereby granted, provided that the above
-// copyright notice and this permission notice appear in all copies.
-//
-// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-// PERFORMANCE OF THIS SOFTWARE.
-
-#include <util/buffer.h>
-#include <dns/exceptions.h>
-#include <dns/messagerenderer.h>
-#include <dns/rdata.h>
-#include <dns/rdataclass.h>
-#include <dns/rrclass.h>
-#include <dns/rrtype.h>
-
-#include <gtest/gtest.h>
-
-#include <dns/tests/unittest_util.h>
-#include <dns/tests/rdata_unittest.h>
-
-using isc::UnitTestUtil;
-using namespace std;
-using namespace isc::dns;
-using namespace isc::util;
-using namespace isc::dns::rdata;
-
-namespace {
-const generic::TXT rdata_txt("Test String");
-const generic::TXT rdata_txt_empty("");
-const generic::TXT rdata_txt_quoated("\"Test String\"");
-const uint8_t wiredata_txt[] = {
-    sizeof("Test String") - 1,
-    'T', 'e', 's', 't', ' ', 'S', 't', 'r', 'i', 'n', 'g'
-};
-const uint8_t wiredata_nulltxt[] = { 0 };
-vector<uint8_t> wiredata_longesttxt(256, 'a');
-
-class Rdata_TXT_Test : public RdataTest {
-protected:
-    Rdata_TXT_Test() {
-        wiredata_longesttxt[0] = 255; // adjust length
-    }
-};
-
-TEST_F(Rdata_TXT_Test, createFromText) {
-    // normal case is covered in toWireBuffer.
-
-    // surrounding double-quotes shouldn't change the result.
-    EXPECT_EQ(0, rdata_txt.compare(rdata_txt_quoated));
-
-    // Null character-string.
-    obuffer.clear();
-    generic::TXT(string("")).toWire(obuffer);
-    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
-                        obuffer.getData(), obuffer.getLength(),
-                        wiredata_nulltxt, sizeof(wiredata_nulltxt));
-
-    // Longest possible character-string.
-    obuffer.clear();
-    generic::TXT(string(255, 'a')).toWire(obuffer);
-    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
-                        obuffer.getData(), obuffer.getLength(),
-                        &wiredata_longesttxt[0], wiredata_longesttxt.size());
-
-    // Too long text for a valid character-string.
-    EXPECT_THROW(generic::TXT(string(256, 'a')), CharStringTooLong);
-
-    // The escape character makes the double quote a part of character-string,
-    // so this is invalid input and should be rejected.
-    EXPECT_THROW(generic::TXT("\"Test String\\\""), InvalidRdataText);
-
-    // Terminating double-quote is provided, so this is valid, but in this
-    // version of implementation we reject escaped characters.
-    EXPECT_THROW(generic::TXT("\"Test String\\\"\""), InvalidRdataText);
-}
-
-void
-makeLargest(vector<uint8_t>& data) {
-    uint8_t ch = 0;
-
-    // create 255 sets of character-strings, each of which has the longest
-    // length (255bytes string + 1-byte length field)
-    for (int i = 0; i < 255; ++i, ++ch) {
-        data.push_back(255);
-        data.insert(data.end(), 255, ch);
-    }
-    // the last character-string should be 255 bytes (including the one-byte
-    // length field) in length so that the total length should be in the range
-    // of 16-bit integers.
-    data.push_back(254);
-    data.insert(data.end(), 254, ch);
-
-    assert(data.size() == 65535);
-}
-
-TEST_F(Rdata_TXT_Test, createFromWire) {
-    EXPECT_EQ(0, rdata_txt.compare(
-                  *rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
-                                        "rdata_txt_fromWire1")));
-
-    // Empty character string
-    EXPECT_EQ(0, rdata_txt_empty.compare(
-                  *rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
-                                        "rdata_txt_fromWire2.wire")));
-
-    // Multiple character strings
-    obuffer.clear();
-    rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
-                         "rdata_txt_fromWire3.wire")->toWire(obuffer);
-    // the result should be 'wiredata_txt' repeated twice
-    vector<uint8_t> expected_data(wiredata_txt, wiredata_txt +
-                                  sizeof(wiredata_txt));
-    expected_data.insert(expected_data.end(), wiredata_txt,
-                         wiredata_txt + sizeof(wiredata_txt));
-    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
-                        obuffer.getData(), obuffer.getLength(),
-                        &expected_data[0], expected_data.size());
-
-    // Largest length of data.  There's nothing special, but should be
-    // constructed safely, and the content should be identical to the original
-    // data.
-    vector<uint8_t> largest_txt_data;
-    makeLargest(largest_txt_data);
-    InputBuffer ibuffer(&largest_txt_data[0], largest_txt_data.size());
-    generic::TXT largest_txt(ibuffer, largest_txt_data.size());
-    obuffer.clear();
-    largest_txt.toWire(obuffer);
-    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
-                        obuffer.getData(), obuffer.getLength(),
-                        &largest_txt_data[0], largest_txt_data.size());
-
-    // rdlen parameter is out of range.  This is a rare event because we'd
-    // normally call the constructor via a polymorphic wrapper, where the
-    // length is validated.  But this should be checked explicitly.
-    InputBuffer ibuffer2(&largest_txt_data[0], largest_txt_data.size());
-    EXPECT_THROW(generic::TXT(ibuffer2, 65536), InvalidRdataLength);
-
-    // RDATA is empty, which is invalid for TXT.
-    EXPECT_THROW(rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
-                                      "rdata_txt_fromWire4.wire"),
-                 DNSMessageFORMERR);
-
-    // character-string length is too large, which could cause overrun.
-    EXPECT_THROW(rdataFactoryFromFile(RRType("TXT"), RRClass("IN"),
-                                      "rdata_txt_fromWire5.wire"),
-                 DNSMessageFORMERR);
-}
-
-TEST_F(Rdata_TXT_Test, toWireBuffer) {
-    rdata_txt.toWire(obuffer);
-    EXPECT_PRED_FORMAT4(UnitTestUtil::matchWireData,
-                        obuffer.getData(), obuffer.getLength(),
-                        wiredata_txt, sizeof(wiredata_txt));
-}
-
-TEST_F(Rdata_TXT_Test, toText) {
-    EXPECT_EQ("\"Test String\"", rdata_txt.toText());
-}
-}
diff --git a/src/lib/dns/tests/serial_unittest.cc b/src/lib/dns/tests/serial_unittest.cc
new file mode 100644
index 0000000..e27f628
--- /dev/null
+++ b/src/lib/dns/tests/serial_unittest.cc
@@ -0,0 +1,179 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/serial.h>
+
+using namespace isc::dns;
+
+class SerialTest : public ::testing::Test {
+public:
+    SerialTest() : one(1), one_2(1), two(2),
+                   date_zero(1980120100), date_one(1980120101),
+                   min(0), max(4294967295u),
+                   number_low(12345),
+                   number_medium(2000000000),
+                   number_high(4000000000u)
+    {}
+    Serial one, one_2, two, date_zero, date_one, min, max, number_low, number_medium, number_high;
+};
+
+//
+// Basic tests
+//
+
+TEST_F(SerialTest, get_value) {
+    EXPECT_EQ(1, one.getValue());
+    EXPECT_NE(2, one.getValue());
+    EXPECT_EQ(2, two.getValue());
+    EXPECT_EQ(1980120100, date_zero.getValue());
+    EXPECT_EQ(1980120101, date_one.getValue());
+    EXPECT_EQ(0, min.getValue());
+    EXPECT_EQ(4294967295u, max.getValue());
+    EXPECT_EQ(12345, number_low.getValue());
+    EXPECT_EQ(2000000000, number_medium.getValue());
+    EXPECT_EQ(4000000000u, number_high.getValue());
+}
+
+TEST_F(SerialTest, equals) {
+    EXPECT_EQ(one, one);
+    EXPECT_EQ(one, one_2);
+    EXPECT_NE(one, two);
+    EXPECT_NE(two, one);
+    EXPECT_EQ(Serial(12345), number_low);
+    EXPECT_NE(Serial(12346), number_low);
+}
+
+TEST_F(SerialTest, comparison) {
+    // These should be true/false even without serial arithmetic
+    EXPECT_LE(one, one);
+    EXPECT_LE(one, one_2);
+    EXPECT_LT(one, two);
+    EXPECT_LE(one, two);
+    EXPECT_GE(two, two);
+    EXPECT_GT(two, one);
+    EXPECT_GE(two, one);
+    EXPECT_LT(one, number_low);
+    EXPECT_LT(number_low, number_medium);
+    EXPECT_LT(number_medium, number_high);
+
+    // now let's try some that 'wrap', as it were
+    EXPECT_GT(min, max);
+    EXPECT_LT(max, min);
+    EXPECT_LT(number_high, number_low);
+}
+
+//
+// RFC 1982 Section 3.1
+//
+TEST_F(SerialTest, addition) {
+    EXPECT_EQ(two, one + one);
+    EXPECT_EQ(two, one + one_2);
+    EXPECT_EQ(max, max + min);
+    EXPECT_EQ(min, max + one);
+    EXPECT_EQ(one, max + two);
+    EXPECT_EQ(one, max + one + one);
+
+    EXPECT_EQ(one + 100, max + 102);
+    EXPECT_EQ(min + 2147483645, max + 2147483646);
+    EXPECT_EQ(min + 2147483646, max + MAX_SERIAL_INCREMENT);
+}
+
+//
+// RFC 1982 Section 3.2 has been checked by the basic tests above
+//
+
+//
+// RFC 1982 Section 4.1
+//
+
+// Helper function for addition_always_larger test, add some numbers
+// and check that the result is always larger than the original
+void do_addition_larger_test(const Serial& number) {
+    EXPECT_GE(number + 0, number);
+    EXPECT_EQ(number + 0, number);
+    EXPECT_GT(number + 1, number);
+    EXPECT_GT(number + 2, number);
+    EXPECT_GT(number + 100, number);
+    EXPECT_GT(number + 1111111, number);
+    EXPECT_GT(number + 2147483646, number);
+    EXPECT_GT(number + MAX_SERIAL_INCREMENT, number);
+    // Try MAX_SERIAL_INCREMENT as a hardcoded number as well
+    EXPECT_GT(number + 2147483647, number);
+}
+
+TEST_F(SerialTest, addition_always_larger) {
+    do_addition_larger_test(one);
+    do_addition_larger_test(two);
+    do_addition_larger_test(date_zero);
+    do_addition_larger_test(date_one);
+    do_addition_larger_test(min);
+    do_addition_larger_test(max);
+    do_addition_larger_test(number_low);
+    do_addition_larger_test(number_medium);
+    do_addition_larger_test(number_high);
+}
+
+//
+// RFC 1982 Section 4.2
+//
+
+// Helper function to do the second addition
+void
+do_two_additions_test_second(const Serial &original,
+                             const Serial &number)
+{
+    EXPECT_NE(original, number);
+    EXPECT_NE(original, number + 0);
+    EXPECT_NE(original, number + 1);
+    EXPECT_NE(original, number + 2);
+    EXPECT_NE(original, number + 100);
+    EXPECT_NE(original, number + 1111111);
+    EXPECT_NE(original, number + 2147483646);
+    EXPECT_NE(original, number + MAX_SERIAL_INCREMENT);
+    EXPECT_NE(original, number + 2147483647);
+}
+
+void do_two_additions_test_first(const Serial &number) {
+    do_two_additions_test_second(number, number + 1);
+    do_two_additions_test_second(number, number + 2);
+    do_two_additions_test_second(number, number + 100);
+    do_two_additions_test_second(number, number + 1111111);
+    do_two_additions_test_second(number, number + 2147483646);
+    do_two_additions_test_second(number, number + MAX_SERIAL_INCREMENT);
+    do_two_additions_test_second(number, number + 2147483647);
+}
+
+TEST_F(SerialTest, two_additions_never_equal) {
+    do_two_additions_test_first(one);
+    do_two_additions_test_first(two);
+    do_two_additions_test_first(date_zero);
+    do_two_additions_test_first(date_one);
+    do_two_additions_test_first(min);
+    do_two_additions_test_first(max);
+    do_two_additions_test_first(number_low);
+    do_two_additions_test_first(number_medium);
+    do_two_additions_test_first(number_high);
+}
+
+//
+// RFC 1982 Section 4.3 and 4.4 have nothing to test
+//
+
+//
+// Tests from RFC 1982 examples
+//
+TEST(SerialTextRFCExamples, rfc_example_tests) {
+}
diff --git a/src/lib/dns/tests/testdata/Makefile.am b/src/lib/dns/tests/testdata/Makefile.am
index 257f2f3..27edf5f 100644
--- a/src/lib/dns/tests/testdata/Makefile.am
+++ b/src/lib/dns/tests/testdata/Makefile.am
@@ -6,7 +6,9 @@ BUILT_SOURCES += message_fromWire10.wire message_fromWire11.wire
 BUILT_SOURCES += message_fromWire12.wire message_fromWire13.wire
 BUILT_SOURCES += message_fromWire14.wire message_fromWire15.wire
 BUILT_SOURCES += message_fromWire16.wire message_fromWire17.wire
-BUILT_SOURCES += message_fromWire18.wire
+BUILT_SOURCES += message_fromWire18.wire message_fromWire19.wire
+BUILT_SOURCES += message_fromWire20.wire message_fromWire21.wire
+BUILT_SOURCES += message_fromWire22.wire
 BUILT_SOURCES += message_toWire2.wire message_toWire3.wire
 BUILT_SOURCES += message_toWire4.wire message_toWire5.wire
 BUILT_SOURCES += message_toText1.wire message_toText2.wire
@@ -26,10 +28,20 @@ BUILT_SOURCES += rdata_nsec3_fromWire10.wire rdata_nsec3_fromWire11.wire
 BUILT_SOURCES += rdata_nsec3_fromWire12.wire rdata_nsec3_fromWire13.wire
 BUILT_SOURCES += rdata_nsec3_fromWire14.wire rdata_nsec3_fromWire15.wire
 BUILT_SOURCES += rdata_rrsig_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire1.wire rdata_minfo_fromWire2.wire
+BUILT_SOURCES += rdata_minfo_fromWire3.wire rdata_minfo_fromWire4.wire
+BUILT_SOURCES += rdata_minfo_fromWire5.wire rdata_minfo_fromWire6.wire
+BUILT_SOURCES += rdata_minfo_toWire1.wire rdata_minfo_toWire2.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed1.wire
+BUILT_SOURCES += rdata_minfo_toWireUncompressed2.wire
 BUILT_SOURCES += rdata_rp_fromWire1.wire rdata_rp_fromWire2.wire
 BUILT_SOURCES += rdata_rp_fromWire3.wire rdata_rp_fromWire4.wire
 BUILT_SOURCES += rdata_rp_fromWire5.wire rdata_rp_fromWire6.wire
 BUILT_SOURCES += rdata_rp_toWire1.wire rdata_rp_toWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire1.wire rdata_afsdb_fromWire2.wire
+BUILT_SOURCES += rdata_afsdb_fromWire3.wire rdata_afsdb_fromWire4.wire
+BUILT_SOURCES += rdata_afsdb_fromWire5.wire
+BUILT_SOURCES += rdata_afsdb_toWire1.wire rdata_afsdb_toWire2.wire
 BUILT_SOURCES += rdata_soa_toWireUncompressed.wire
 BUILT_SOURCES += rdata_txt_fromWire2.wire rdata_txt_fromWire3.wire
 BUILT_SOURCES += rdata_txt_fromWire4.wire rdata_txt_fromWire5.wire
@@ -49,8 +61,7 @@ BUILT_SOURCES += tsig_verify10.wire
 
 # NOTE: keep this in sync with real file listing
 # so is included in tarball
-EXTRA_DIST = gen-wiredata.py.in
-EXTRA_DIST += edns_toWire1.spec edns_toWire2.spec
+EXTRA_DIST = edns_toWire1.spec edns_toWire2.spec
 EXTRA_DIST += edns_toWire3.spec edns_toWire4.spec
 EXTRA_DIST += masterload.txt
 EXTRA_DIST += message_fromWire1 message_fromWire2
@@ -62,6 +73,8 @@ EXTRA_DIST += message_fromWire11.spec message_fromWire12.spec
 EXTRA_DIST += message_fromWire13.spec message_fromWire14.spec
 EXTRA_DIST += message_fromWire15.spec message_fromWire16.spec
 EXTRA_DIST += message_fromWire17.spec message_fromWire18.spec
+EXTRA_DIST += message_fromWire19.spec message_fromWire20.spec
+EXTRA_DIST += message_fromWire21.spec message_fromWire22.spec
 EXTRA_DIST += message_toWire1 message_toWire2.spec message_toWire3.spec
 EXTRA_DIST += message_toWire4.spec message_toWire5.spec
 EXTRA_DIST += message_toText1.txt message_toText1.spec
@@ -77,6 +90,7 @@ EXTRA_DIST += question_fromWire question_toWire1 question_toWire2
 EXTRA_DIST += rdatafields1.spec rdatafields2.spec rdatafields3.spec
 EXTRA_DIST += rdatafields4.spec rdatafields5.spec rdatafields6.spec
 EXTRA_DIST += rdata_cname_fromWire rdata_dname_fromWire rdata_dnskey_fromWire
+EXTRA_DIST += rdata_dhcid_fromWire rdata_dhcid_toWire
 EXTRA_DIST += rdata_ds_fromWire rdata_in_a_fromWire rdata_in_aaaa_fromWire
 EXTRA_DIST += rdata_mx_fromWire rdata_mx_toWire1 rdata_mx_toWire2
 EXTRA_DIST += rdata_ns_fromWire
@@ -100,7 +114,18 @@ EXTRA_DIST += rdata_rp_fromWire1.spec rdata_rp_fromWire2.spec
 EXTRA_DIST += rdata_rp_fromWire3.spec rdata_rp_fromWire4.spec
 EXTRA_DIST += rdata_rp_fromWire5.spec rdata_rp_fromWire6.spec
 EXTRA_DIST += rdata_rp_toWire1.spec rdata_rp_toWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire1.spec rdata_afsdb_fromWire2.spec
+EXTRA_DIST += rdata_afsdb_fromWire3.spec rdata_afsdb_fromWire4.spec
+EXTRA_DIST += rdata_afsdb_fromWire5.spec
+EXTRA_DIST += rdata_afsdb_toWire1.spec rdata_afsdb_toWire2.spec
 EXTRA_DIST += rdata_soa_fromWire rdata_soa_toWireUncompressed.spec
+EXTRA_DIST += rdata_srv_fromWire
+EXTRA_DIST += rdata_minfo_fromWire1.spec rdata_minfo_fromWire2.spec
+EXTRA_DIST += rdata_minfo_fromWire3.spec rdata_minfo_fromWire4.spec
+EXTRA_DIST += rdata_minfo_fromWire5.spec rdata_minfo_fromWire6.spec
+EXTRA_DIST += rdata_minfo_toWire1.spec rdata_minfo_toWire2.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed1.spec
+EXTRA_DIST += rdata_minfo_toWireUncompressed2.spec
 EXTRA_DIST += rdata_txt_fromWire1 rdata_txt_fromWire2.spec
 EXTRA_DIST += rdata_txt_fromWire3.spec rdata_txt_fromWire4.spec
 EXTRA_DIST += rdata_txt_fromWire5.spec rdata_unknown_fromWire
@@ -122,4 +147,4 @@ EXTRA_DIST += tsig_verify7.spec tsig_verify8.spec tsig_verify9.spec
 EXTRA_DIST += tsig_verify10.spec
 
 .spec.wire:
-	./gen-wiredata.py -o $@ $<
+	$(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/lib/dns/tests/testdata/gen-wiredata.py.in b/src/lib/dns/tests/testdata/gen-wiredata.py.in
deleted file mode 100755
index 818c6e9..0000000
--- a/src/lib/dns/tests/testdata/gen-wiredata.py.in
+++ /dev/null
@@ -1,610 +0,0 @@
-#!@PYTHON@
-
-# Copyright (C) 2010  Internet Systems Consortium.
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
-# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
-# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
-# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
-# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-import configparser, re, time, socket, sys
-from datetime import datetime
-from optparse import OptionParser
-
-re_hex = re.compile(r'^0x[0-9a-fA-F]+')
-re_decimal = re.compile(r'^\d+$')
-re_string = re.compile(r"\'(.*)\'$")
-
-dnssec_timefmt = '%Y%m%d%H%M%S'
-
-dict_qr = { 'query' : 0, 'response' : 1 }
-dict_opcode = { 'query' : 0, 'iquery' : 1, 'status' : 2, 'notify' : 4,
-                'update' : 5 }
-rdict_opcode = dict([(dict_opcode[k], k.upper()) for k in dict_opcode.keys()])
-dict_rcode = { 'noerror' : 0, 'formerr' : 1, 'servfail' : 2, 'nxdomain' : 3,
-               'notimp' : 4, 'refused' : 5, 'yxdomain' : 6, 'yxrrset' : 7,
-               'nxrrset' : 8, 'notauth' : 9, 'notzone' : 10 }
-rdict_rcode = dict([(dict_rcode[k], k.upper()) for k in dict_rcode.keys()])
-dict_rrtype = { 'none' : 0, 'a' : 1, 'ns' : 2, 'md' : 3, 'mf' : 4, 'cname' : 5,
-                'soa' : 6, 'mb' : 7, 'mg' : 8, 'mr' : 9, 'null' : 10,
-                'wks' : 11, 'ptr' : 12, 'hinfo' : 13, 'minfo' : 14, 'mx' : 15,
-                'txt' : 16, 'rp' : 17, 'afsdb' : 18, 'x25' : 19, 'isdn' : 20,
-                'rt' : 21, 'nsap' : 22, 'nsap_tr' : 23, 'sig' : 24, 'key' : 25,
-                'px' : 26, 'gpos' : 27, 'aaaa' : 28, 'loc' : 29, 'nxt' : 30,
-                'srv' : 33, 'naptr' : 35, 'kx' : 36, 'cert' : 37, 'a6' : 38,
-                'dname' : 39, 'opt' : 41, 'apl' : 42, 'ds' : 43, 'sshfp' : 44,
-                'ipseckey' : 45, 'rrsig' : 46, 'nsec' : 47, 'dnskey' : 48,
-                'dhcid' : 49, 'nsec3' : 50, 'nsec3param' : 51, 'hip' : 55,
-                'spf' : 99, 'unspec' : 103, 'tkey' : 249, 'tsig' : 250,
-                'dlv' : 32769, 'ixfr' : 251, 'axfr' : 252, 'mailb' : 253,
-                'maila' : 254, 'any' : 255 }
-rdict_rrtype = dict([(dict_rrtype[k], k.upper()) for k in dict_rrtype.keys()])
-dict_rrclass = { 'in' : 1, 'ch' : 3, 'hs' : 4, 'any' : 255 }
-rdict_rrclass = dict([(dict_rrclass[k], k.upper()) for k in \
-                          dict_rrclass.keys()])
-dict_algorithm = { 'rsamd5' : 1, 'dh' : 2, 'dsa' : 3, 'ecc' : 4,
-                   'rsasha1' : 5 }
-dict_nsec3_algorithm = { 'reserved' : 0, 'sha1' : 1 }
-rdict_algorithm = dict([(dict_algorithm[k], k.upper()) for k in \
-                            dict_algorithm.keys()])
-rdict_nsec3_algorithm = dict([(dict_nsec3_algorithm[k], k.upper()) for k in \
-                                  dict_nsec3_algorithm.keys()])
-
-header_xtables = { 'qr' : dict_qr, 'opcode' : dict_opcode,
-                   'rcode' : dict_rcode }
-question_xtables = { 'rrtype' : dict_rrtype, 'rrclass' : dict_rrclass }
-rrsig_xtables = { 'algorithm' : dict_algorithm }
-
-def parse_value(value, xtable = {}):
-    if re.search(re_hex, value):
-        return int(value, 16)
-    if re.search(re_decimal, value):
-        return int(value)
-    m = re.match(re_string, value)
-    if m:
-        return m.group(1)
-    lovalue = value.lower()
-    if lovalue in xtable:
-        return xtable[lovalue]
-    return value
-
-def code_totext(code, dict):
-    if code in dict.keys():
-        return dict[code] + '(' + str(code) + ')'
-    return str(code)
-
-def encode_name(name, absolute=True):
-    # make sure the name is dot-terminated.  duplicate dots will be ignored
-    # below.
-    name += '.'
-    labels = name.split('.')
-    wire = ''
-    for l in labels:
-        if len(l) > 4 and l[0:4] == 'ptr=':
-            # special meta-syntax for compression pointer
-            wire += '%04x' % (0xc000 | int(l[4:]))
-            break
-        if absolute or len(l) > 0:
-            wire += '%02x' % len(l)
-            wire += ''.join(['%02x' % ord(ch) for ch in l])
-        if len(l) == 0:
-            break
-    return wire
-
-def encode_string(name, len=None):
-    if type(name) is int and len is not None:
-        return '%0.*x' % (len * 2, name)
-    return ''.join(['%02x' % ord(ch) for ch in name])
-
-def count_namelabels(name):
-    if name == '.':             # special case
-        return 0
-    m = re.match('^(.*)\.$', name)
-    if m:
-        name = m.group(1)
-    return len(name.split('.'))
-
-def get_config(config, section, configobj, xtables = {}):
-    try:
-        for field in config.options(section):
-            value = config.get(section, field)
-            if field in xtables.keys():
-                xtable = xtables[field]
-            else:
-                xtable = {}
-            configobj.__dict__[field] = parse_value(value, xtable)
-    except configparser.NoSectionError:
-        return False
-    return True
-
-def print_header(f, input_file):
-    f.write('''###
-### This data file was auto-generated from ''' + input_file + '''
-###
-''')
-
-class Name:
-    name = 'example.com'
-    pointer = None                # no compression by default
-    def dump(self, f):
-        name = self.name
-        if self.pointer is not None:
-            if len(name) > 0 and name[-1] != '.':
-                name += '.'
-            name += 'ptr=%d' % self.pointer
-        name_wire = encode_name(name)
-        f.write('\n# DNS Name: %s' % self.name)
-        if self.pointer is not None:
-            f.write(' + compression pointer: %d' % self.pointer)
-        f.write('\n')
-        f.write('%s' % name_wire)
-        f.write('\n')
-
-class DNSHeader:
-    id = 0x1035
-    (qr, aa, tc, rd, ra, ad, cd) = 0, 0, 0, 0, 0, 0, 0
-    mbz = 0
-    rcode = 0                   # noerror
-    opcode = 0                  # query
-    (qdcount, ancount, nscount, arcount) = 1, 0, 0, 0
-    def dump(self, f):
-        f.write('\n# Header Section\n')
-        f.write('# ID=' + str(self.id))
-        f.write(' QR=' + ('Response' if self.qr else 'Query'))
-        f.write(' Opcode=' + code_totext(self.opcode, rdict_opcode))
-        f.write(' Rcode=' + code_totext(self.rcode, rdict_rcode))
-        f.write('%s' % (' AA' if self.aa else ''))
-        f.write('%s' % (' TC' if self.tc else ''))
-        f.write('%s' % (' RD' if self.rd else ''))
-        f.write('%s' % (' AD' if self.ad else ''))
-        f.write('%s' % (' CD' if self.cd else ''))
-        f.write('\n')
-        f.write('%04x ' % self.id)
-        flag_and_code = 0
-        flag_and_code |= (self.qr << 15 | self.opcode << 14 | self.aa << 10 |
-                          self.tc << 9 | self.rd << 8 | self.ra << 7 |
-                          self.mbz << 6 | self.ad << 5 | self.cd << 4 |
-                          self.rcode)
-        f.write('%04x\n' % flag_and_code)
-        f.write('# QDCNT=%d, ANCNT=%d, NSCNT=%d, ARCNT=%d\n' %
-                (self.qdcount, self.ancount, self.nscount, self.arcount))
-        f.write('%04x %04x %04x %04x\n' % (self.qdcount, self.ancount,
-                                           self.nscount, self.arcount))
-
-class DNSQuestion:
-    name = 'example.com.'
-    rrtype = parse_value('A', dict_rrtype)
-    rrclass = parse_value('IN', dict_rrclass)
-    def dump(self, f):
-        f.write('\n# Question Section\n')
-        f.write('# QNAME=%s QTYPE=%s QCLASS=%s\n' %
-                (self.name,
-                 code_totext(self.rrtype, rdict_rrtype),
-                 code_totext(self.rrclass, rdict_rrclass)))
-        f.write(encode_name(self.name))
-        f.write(' %04x %04x\n' % (self.rrtype, self.rrclass))
-
-class EDNS:
-    name = '.'
-    udpsize = 4096
-    extrcode = 0
-    version = 0
-    do = 0
-    mbz = 0
-    rdlen = 0
-    def dump(self, f):
-        f.write('\n# EDNS OPT RR\n')
-        f.write('# NAME=%s TYPE=%s UDPSize=%d ExtRcode=%s Version=%s DO=%d\n' %
-                (self.name, code_totext(dict_rrtype['opt'], rdict_rrtype),
-                 self.udpsize, self.extrcode, self.version,
-                 1 if self.do else 0))
-        
-        code_vers = (self.extrcode << 8) | (self.version & 0x00ff)
-        extflags = (self.do << 15) | (self.mbz & 0x8000)
-        f.write('%s %04x %04x %04x %04x\n' %
-                (encode_name(self.name), dict_rrtype['opt'], self.udpsize,
-                 code_vers, extflags))
-        f.write('# RDLEN=%d\n' % self.rdlen)
-        f.write('%04x\n' % self.rdlen)
-
-class RR:
-    '''This is a base class for various types of RR test data.
-    For each RR type (A, AAAA, NS, etc), we define a derived class of RR
-    to dump type specific RDATA parameters.  This class defines parameters
-    common to all types of RDATA, namely the owner name, RR class and TTL.
-    The dump() method of derived classes are expected to call dump_header(),
-    whose default implementation is provided in this class.  This method
-    decides whether to dump the test data as an RR (with name, type, class)
-    or only as RDATA (with its length), and dumps the corresponding data
-    via the specified file object.
-
-    By convention we assume derived classes are named after the common
-    standard mnemonic of the corresponding RR types.  For example, the
-    derived class for the RR type SOA should be named "SOA".
-
-    Configurable parameters are as follows:
-    - as_rr (bool): Whether or not the data is to be dumped as an RR.  False
-      by default.
-    - rr_class (string): The RR class of the data.  Only meaningful when the
-      data is dumped as an RR.  Default is 'IN'.
-    - rr_ttl (integer): The TTL value of the RR.  Only meaningful when the
-      data is dumped as an RR.  Default is 86400 (1 day).
-    '''
-
-    def __init__(self):
-        self.as_rr = False
-        # only when as_rr is True, same for class/TTL:
-        self.rr_name = 'example.com'
-        self.rr_class = 'IN'
-        self.rr_ttl = 86400
-    def dump_header(self, f, rdlen):
-        type_txt = self.__class__.__name__
-        type_code = parse_value(type_txt, dict_rrtype)
-        if self.as_rr:
-            rrclass = parse_value(self.rr_class, dict_rrclass)
-            f.write('\n# %s RR (QNAME=%s Class=%s TTL=%d RDLEN=%d)\n' %
-                    (type_txt, self.rr_name,
-                     code_totext(rrclass, rdict_rrclass), self.rr_ttl, rdlen))
-            f.write('%s %04x %04x %08x %04x\n' %
-                    (encode_name(self.rr_name), type_code, rrclass,
-                     self.rr_ttl, rdlen))
-        else:
-            f.write('\n# %s RDATA (RDLEN=%d)\n' % (type_txt, rdlen))
-            f.write('%04x\n' % rdlen)
-
-class A(RR):
-    rdlen = 4                   # fixed by default
-    address = '192.0.2.1'
-
-    def dump(self, f):
-        self.dump_header(f, self.rdlen)
-        f.write('# Address=%s\n' % (self.address))
-        bin_address = socket.inet_aton(self.address)
-        f.write('%02x%02x%02x%02x\n' % (bin_address[0], bin_address[1],
-                                        bin_address[2], bin_address[3]))
-
-class NS(RR):
-    rdlen = None                   # auto calculate
-    nsname = 'ns.example.com'
-
-    def dump(self, f):
-        nsname_wire = encode_name(self.nsname)
-        if self.rdlen is None:
-            self.rdlen = len(nsname_wire) / 2
-        self.dump_header(f, self.rdlen)
-        f.write('# NS name=%s\n' % (self.nsname))
-        f.write('%s\n' % nsname_wire)
-
-class SOA(RR):
-    rdlen = None                  # auto-calculate
-    mname = 'ns.example.com'
-    rname = 'root.example.com'
-    serial = 2010012601
-    refresh = 3600
-    retry = 300
-    expire = 3600000
-    minimum = 1200
-    def dump(self, f):
-        mname_wire = encode_name(self.mname)
-        rname_wire = encode_name(self.rname)
-        if self.rdlen is None:
-            self.rdlen = int(20 + len(mname_wire) / 2 + len(str(rname_wire)) / 2)
-        self.dump_header(f, self.rdlen)
-        f.write('# NNAME=%s RNAME=%s\n' % (self.mname, self.rname))
-        f.write('%s %s\n' % (mname_wire, rname_wire))
-        f.write('# SERIAL(%d) REFRESH(%d) RETRY(%d) EXPIRE(%d) MINIMUM(%d)\n' %
-                (self.serial, self.refresh, self.retry, self.expire,
-                 self.minimum))
-        f.write('%08x %08x %08x %08x %08x\n' % (self.serial, self.refresh,
-                                                self.retry, self.expire,
-                                                self.minimum))
-
-class TXT(RR):
-    rdlen = None                # auto-calculate
-    nstring = 1                 # number of character-strings
-    stringlen = -1              # default string length, auto-calculate
-    string = 'Test String'      # default string
-    def dump(self, f):
-        stringlen_list = []
-        string_list = []
-        wirestring_list = []
-        for i in range(0, self.nstring):
-            key_string = 'string' + str(i)
-            if key_string in self.__dict__:
-                string_list.append(self.__dict__[key_string])
-            else:
-                string_list.append(self.string)
-            wirestring_list.append(encode_string(string_list[-1]))
-            key_stringlen = 'stringlen' + str(i)
-            if key_stringlen in self.__dict__:
-                stringlen_list.append(self.__dict__[key_stringlen])
-            else:
-                stringlen_list.append(self.stringlen)
-            if stringlen_list[-1] < 0:
-                stringlen_list[-1] = int(len(wirestring_list[-1]) / 2)
-        if self.rdlen is None:
-            self.rdlen = int(len(''.join(wirestring_list)) / 2) + self.nstring
-        self.dump_header(f, self.rdlen)
-        for i in range(0, self.nstring):
-            f.write('# String Len=%d, String=\"%s\"\n' %
-                    (stringlen_list[i], string_list[i]))
-            f.write('%02x%s%s\n' % (stringlen_list[i],
-                                    ' ' if len(wirestring_list[i]) > 0 else '',
-                                    wirestring_list[i]))
-
-class RP:
-    '''Implements rendering RP RDATA in the wire format.
-    Configurable parameters are as follows:
-    - rdlen: 16-bit RDATA length.  If omitted, the accurate value is auto
-      calculated and used; if negative, the RDLEN field will be omitted from
-      the output data.
-    - mailbox: The mailbox field.
-    - text: The text field.
-    All of these parameters have the default values and can be omitted.
-    '''
-    rdlen = None                # auto-calculate
-    mailbox = 'root.example.com'
-    text = 'rp-text.example.com'
-    def dump(self, f):
-        mailbox_wire = encode_name(self.mailbox)
-        text_wire = encode_name(self.text)
-        if self.rdlen is None:
-            self.rdlen = (len(mailbox_wire) + len(text_wire)) / 2
-        else:
-            self.rdlen = int(self.rdlen)
-        if self.rdlen >= 0:
-            f.write('\n# RP RDATA (RDLEN=%d)\n' % self.rdlen)
-            f.write('%04x\n' % self.rdlen)
-        else:
-            f.write('\n# RP RDATA (RDLEN omitted)\n')
-        f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
-        f.write('%s %s\n' % (mailbox_wire, text_wire))
-
-class NSECBASE:
-    '''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
-    these RRs.  The NSEC and NSEC3 classes will be inherited from this
-    class.'''
-    nbitmap = 1                 # number of bitmaps
-    block = 0
-    maplen = None              # default bitmap length, auto-calculate
-    bitmap = '040000000003'     # an arbtrarily chosen bitmap sample
-    def dump(self, f):
-        # first, construct the bitmpa data
-        block_list = []
-        maplen_list = []
-        bitmap_list = []
-        for i in range(0, self.nbitmap):
-            key_bitmap = 'bitmap' + str(i)
-            if key_bitmap in self.__dict__:
-                bitmap_list.append(self.__dict__[key_bitmap])
-            else:
-                bitmap_list.append(self.bitmap)
-            key_maplen = 'maplen' + str(i)
-            if key_maplen in self.__dict__:
-                maplen_list.append(self.__dict__[key_maplen])
-            else:
-                maplen_list.append(self.maplen)
-            if maplen_list[-1] is None: # calculate it if not specified
-                maplen_list[-1] = int(len(bitmap_list[-1]) / 2)
-            key_block = 'block' + str(i)
-            if key_block in self.__dict__:
-               block_list.append(self.__dict__[key_block])
-            else:
-                block_list.append(self.block)
-
-        # dump RR-type specific part (NSEC or NSEC3)
-        self.dump_fixedpart(f, 2 * self.nbitmap + \
-                                int(len(''.join(bitmap_list)) / 2))
-
-        # dump the bitmap
-        for i in range(0, self.nbitmap):
-            f.write('# Bitmap: Block=%d, Length=%d\n' %
-                    (block_list[i], maplen_list[i]))
-            f.write('%02x %02x %s\n' %
-                    (block_list[i], maplen_list[i], bitmap_list[i]))
-
-class NSEC(NSECBASE):
-    rdlen = None                # auto-calculate
-    nextname = 'next.example.com'
-    def dump_fixedpart(self, f, bitmap_totallen):
-        name_wire = encode_name(self.nextname)
-        if self.rdlen is None:
-            # if rdlen needs to be calculated, it must be based on the bitmap
-            # length, because the configured maplen can be fake.
-            self.rdlen = int(len(name_wire) / 2) + bitmap_totallen
-        f.write('\n# NSEC RDATA (RDLEN=%d)\n' % self.rdlen)
-        f.write('%04x\n' % self.rdlen);
-        f.write('# Next Name=%s (%d bytes)\n' % (self.nextname,
-                                                 int(len(name_wire) / 2)))
-        f.write('%s\n' % name_wire)
-
-class NSEC3(NSECBASE):
-    rdlen = None                # auto-calculate
-    hashalg = 1                 # SHA-1
-    optout = False              # opt-out flag
-    mbz = 0                     # other flag fields (none defined yet)
-    iterations = 1
-    saltlen = 5
-    salt = 's' * saltlen
-    hashlen = 20
-    hash = 'h' * hashlen
-    def dump_fixedpart(self, f, bitmap_totallen):
-        if self.rdlen is None:
-            # if rdlen needs to be calculated, it must be based on the bitmap
-            # length, because the configured maplen can be fake.
-            self.rdlen = 4 + 1 + len(self.salt) + 1 + len(self.hash) \
-                + bitmap_totallen
-        f.write('\n# NSEC3 RDATA (RDLEN=%d)\n' % self.rdlen)
-        f.write('%04x\n' % self.rdlen)
-        optout_val = 1 if self.optout else 0
-        f.write('# Hash Alg=%s, Opt-Out=%d, Other Flags=%0x, Iterations=%d\n' %
-                (code_totext(self.hashalg, rdict_nsec3_algorithm),
-                 optout_val, self.mbz, self.iterations))
-        f.write('%02x %02x %04x\n' %
-                (self.hashalg, (self.mbz << 1) | optout_val, self.iterations))
-        f.write("# Salt Len=%d, Salt='%s'\n" % (self.saltlen, self.salt))
-        f.write('%02x%s%s\n' % (self.saltlen,
-                                ' ' if len(self.salt) > 0 else '',
-                                encode_string(self.salt)))
-        f.write("# Hash Len=%d, Hash='%s'\n" % (self.hashlen, self.hash))
-        f.write('%02x%s%s\n' % (self.hashlen,
-                                ' ' if len(self.hash) > 0 else '',
-                                encode_string(self.hash)))
-
-class RRSIG:
-    rdlen = -1                  # auto-calculate
-    covered = 1                 # A
-    algorithm = 5               # RSA-SHA1
-    labels = -1                 # auto-calculate (#labels of signer)
-    originalttl = 3600
-    expiration = int(time.mktime(datetime.strptime('20100131120000',
-                                                   dnssec_timefmt).timetuple()))
-    inception = int(time.mktime(datetime.strptime('20100101120000',
-                                                  dnssec_timefmt).timetuple()))
-    tag = 0x1035
-    signer = 'example.com'
-    signature = 0x123456789abcdef123456789abcdef
-    def dump(self, f):
-        name_wire = encode_name(self.signer)
-        sig_wire = '%x' % self.signature 
-        rdlen = self.rdlen
-        if rdlen < 0:
-            rdlen = int(18 + len(name_wire) / 2 + len(str(sig_wire)) / 2)
-        labels = self.labels
-        if labels < 0:
-            labels = count_namelabels(self.signer)
-        f.write('\n# RRSIG RDATA (RDLEN=%d)\n' % rdlen)
-        f.write('%04x\n' % rdlen);
-        f.write('# Covered=%s Algorithm=%s Labels=%d OrigTTL=%d\n' %
-                (code_totext(self.covered, rdict_rrtype),
-                 code_totext(self.algorithm, rdict_algorithm), labels,
-                 self.originalttl))
-        f.write('%04x %02x %02x %08x\n' % (self.covered, self.algorithm,
-                                           labels, self.originalttl))
-        f.write('# Expiration=%s, Inception=%s\n' %
-                (str(self.expiration), str(self.inception)))
-        f.write('%08x %08x\n' % (self.expiration, self.inception))
-        f.write('# Tag=%d Signer=%s and Signature\n' % (self.tag, self.signer))
-        f.write('%04x %s %s\n' % (self.tag, name_wire, sig_wire))
-
-class TSIG(RR):
-    rdlen = None                # auto-calculate
-    algorithm = 'hmac-sha256'
-    time_signed = 1286978795    # arbitrarily chosen default
-    fudge = 300
-    mac_size = None             # use a common value for the algorithm
-    mac = None                  # use 'x' * mac_size
-    original_id = 2845          # arbitrarily chosen default
-    error = 0
-    other_len = None         # 6 if error is BADTIME; otherwise 0
-    other_data = None        # use time_signed + fudge + 1 for BADTIME
-    dict_macsize = { 'hmac-md5' : 16, 'hmac-sha1' : 20, 'hmac-sha256' : 32 }
-
-    # TSIG has some special defaults
-    def __init__(self):
-        super().__init__()
-        self.rr_class = 'ANY'
-        self.rr_ttl = 0
-
-    def dump(self, f):
-        if str(self.algorithm) == 'hmac-md5':
-            name_wire = encode_name('hmac-md5.sig-alg.reg.int')
-        else:
-            name_wire = encode_name(self.algorithm)
-        mac_size = self.mac_size
-        if mac_size is None:
-            if self.algorithm in self.dict_macsize.keys():
-                mac_size = self.dict_macsize[self.algorithm]
-            else:
-                raise RuntimeError('TSIG Mac Size cannot be determined')
-        mac = encode_string('x' * mac_size) if self.mac is None else \
-            encode_string(self.mac, mac_size)
-        other_len = self.other_len
-        if other_len is None:
-            # 18 = BADTIME
-            other_len = 6 if self.error == 18 else 0
-        other_data = self.other_data
-        if other_data is None:
-            other_data = '%012x' % (self.time_signed + self.fudge + 1) \
-                if self.error == 18 else ''
-        else:
-            other_data = encode_string(self.other_data, other_len)
-        if self.rdlen is None:
-            self.rdlen = int(len(name_wire) / 2 + 16 + len(mac) / 2 + \
-                                 len(other_data) / 2)
-        self.dump_header(f, self.rdlen)
-        f.write('# Algorithm=%s Time-Signed=%d Fudge=%d\n' %
-                (self.algorithm, self.time_signed, self.fudge))
-        f.write('%s %012x %04x\n' % (name_wire, self.time_signed, self.fudge))
-        f.write('# MAC Size=%d MAC=(see hex)\n' % mac_size)
-        f.write('%04x%s\n' % (mac_size, ' ' + mac if len(mac) > 0 else ''))
-        f.write('# Original-ID=%d Error=%d\n' % (self.original_id, self.error))
-        f.write('%04x %04x\n' %  (self.original_id, self.error))
-        f.write('# Other-Len=%d Other-Data=(see hex)\n' % other_len)
-        f.write('%04x%s\n' % (other_len,
-                              ' ' + other_data if len(other_data) > 0 else ''))
-
-def get_config_param(section):
-    config_param = {'name' : (Name, {}),
-                    'header' : (DNSHeader, header_xtables),
-                    'question' : (DNSQuestion, question_xtables),
-                    'edns' : (EDNS, {}), 'a' : (A, {}), 'ns' : (NS, {}),
-                    'soa' : (SOA, {}), 'txt' : (TXT, {}),
-                    'rp' : (RP, {}), 'rrsig' : (RRSIG, {}),
-                    'nsec' : (NSEC, {}), 'nsec3' : (NSEC3, {}),
-                    'tsig' : (TSIG, {}) }
-    s = section
-    m = re.match('^([^:]+)/\d+$', section)
-    if m:
-        s = m.group(1)
-    return config_param[s]
-
-usage = '''usage: %prog [options] input_file'''
-
-if __name__ == "__main__":
-    parser = OptionParser(usage=usage)
-    parser.add_option('-o', '--output', action='store', dest='output',
-                      default=None, metavar='FILE',
-                      help='output file name [default: prefix of input_file]')
-    (options, args) = parser.parse_args()
-
-    if len(args) == 0:
-        parser.error('input file is missing')
-    configfile = args[0]
-
-    outputfile = options.output
-    if not outputfile:
-        m = re.match('(.*)\.[^.]+$', configfile)
-        if m:
-            outputfile = m.group(1)
-        else:
-            raise ValueError('output file is not specified and input file is not in the form of "output_file.suffix"')
-
-    config = configparser.SafeConfigParser()
-    config.read(configfile)
-
-    output = open(outputfile, 'w')
-
-    print_header(output, configfile)
-
-    # First try the 'custom' mode; if it fails assume the standard mode.
-    try:
-        sections = config.get('custom', 'sections').split(':')
-    except configparser.NoSectionError:
-        sections = ['header', 'question', 'edns']
-
-    for s in sections:
-        section_param = get_config_param(s)
-        (obj, xtables) = (section_param[0](), section_param[1])
-        if get_config(config, s, obj, xtables):
-            obj.dump(output)
-
-    output.close()
diff --git a/src/lib/dns/tests/testdata/message_fromWire19.spec b/src/lib/dns/tests/testdata/message_fromWire19.spec
new file mode 100644
index 0000000..8212dbf
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire19.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# answer section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+ancount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire20.spec b/src/lib/dns/tests/testdata/message_fromWire20.spec
new file mode 100644
index 0000000..91986e4
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire20.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# authority section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+nscount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire21.spec b/src/lib/dns/tests/testdata/message_fromWire21.spec
new file mode 100644
index 0000000..cd6aac9
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire21.spec
@@ -0,0 +1,20 @@
+#
+# A non realistic DNS response message containing mixed types of RRs in the
+# additional section in a mixed order.
+#
+
+[custom]
+sections: header:question:a/1:aaaa:a/2
+[header]
+qr: 1
+arcount: 3
+[question]
+name: www.example.com
+rrtype: A
+[a/1]
+as_rr: True
+[aaaa]
+as_rr: True
+[a/2]
+as_rr: True
+address: 192.0.2.2
diff --git a/src/lib/dns/tests/testdata/message_fromWire22.spec b/src/lib/dns/tests/testdata/message_fromWire22.spec
new file mode 100644
index 0000000..a52523b
--- /dev/null
+++ b/src/lib/dns/tests/testdata/message_fromWire22.spec
@@ -0,0 +1,14 @@
+#
+# A simple DNS message containing one SOA RR in the answer section.  This is
+# intended to be trimmed to emulate a bogus message.
+#
+
+[custom]
+sections: header:question:soa
+[header]
+qr: 1
+ancount: 1
+[question]
+rrtype: SOA
+[soa]
+as_rr: True
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
new file mode 100644
index 0000000..f831313
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: afsdb
+[afsdb]
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
new file mode 100644
index 0000000..f33e768
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com
+[afsdb]
+server: afsdb.ptr=0
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
new file mode 100644
index 0000000..993032f
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire3.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
new file mode 100644
index 0000000..37abf13
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire4.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
new file mode 100644
index 0000000..0ea79dd
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_fromWire5.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+server: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
new file mode 100644
index 0000000..1946458
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire1.spec
@@ -0,0 +1,4 @@
+[custom]
+sections: afsdb
+[afsdb]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
new file mode 100644
index 0000000..c80011a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_afsdb_toWire2.spec
@@ -0,0 +1,8 @@
+[custom]
+sections: name:afsdb
+[name]
+name: example.com.
+[afsdb]
+subtype: 0
+server: root.example.com
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_dhcid_fromWire b/src/lib/dns/tests/testdata/rdata_dhcid_fromWire
new file mode 100644
index 0000000..0c8d56a
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_dhcid_fromWire
@@ -0,0 +1,12 @@
+#
+# DHCID RDATA stored in an input buffer
+#
+# Valid RDATA for 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+#
+# RDLENGHT=41 bytes
+# 0  1
+ 00 29
+# 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+d0 b2 20 d0 bb d0 b5 d1 81 d1 83 20 d1 80 d0 be
+d0 b4 d0 b8 d0 bb d0 b0 d1 81 d1 8c 20 d1 91 d0
+bb d0 be d1 87 d0 ba d0 b0
diff --git a/src/lib/dns/tests/testdata/rdata_dhcid_toWire b/src/lib/dns/tests/testdata/rdata_dhcid_toWire
new file mode 100644
index 0000000..99ec229
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_dhcid_toWire
@@ -0,0 +1,7 @@
+#
+# DHCID RDATA stored in an output buffer
+#
+# 0LIg0LvQtdGB0YMg0YDQvtC00LjQu9Cw0YHRjCDRkdC70L7Rh9C60LA=
+d0 b2 20 d0 bb d0 b5 d1 81 d1 83 20 d1 80 d0 be
+d0 b4 d0 b8 d0 bb d0 b0 d1 81 d1 8c 20 d1 91 d0
+bb d0 be d1 87 d0 ba d0 b0
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
new file mode 100644
index 0000000..2c43db0
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire1.spec
@@ -0,0 +1,3 @@
+[custom]
+sections: minfo
+[minfo]
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
new file mode 100644
index 0000000..d781cac
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire2.spec
@@ -0,0 +1,7 @@
+[custom]
+sections: name:minfo
+[name]
+name: a.example.com.
+[minfo]
+rmailbox: rmailbox.ptr=02
+emailbox: emailbox.ptr=02
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
new file mode 100644
index 0000000..a1d4b76
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire3.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too short
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 3
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
new file mode 100644
index 0000000..269a6ce
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire4.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+# rdlength too long
+[minfo]
+emailbox: emailbox.ptr=11
+rdlen: 80
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
new file mode 100644
index 0000000..3a888e3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire5.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus rmailbox name
+[minfo]
+rmailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
new file mode 100644
index 0000000..c75ed8e
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_fromWire6.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+# bogus emailbox name
+[minfo]
+emailbox: "01234567890123456789012345678901234567890123456789012345678901234"
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
new file mode 100644
index 0000000..7b340a3
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire1.spec
@@ -0,0 +1,5 @@
+[custom]
+sections: minfo
+[minfo]
+emailbox: emailbox.ptr=09
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
new file mode 100644
index 0000000..132f118
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWire2.spec
@@ -0,0 +1,6 @@
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+emailbox: emailbox.ptr=05
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
new file mode 100644
index 0000000..d99a381
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed1.spec
@@ -0,0 +1,7 @@
+#
+# A simplest form of MINFO: all default parameters
+#
+[custom]
+sections: minfo
+[minfo]
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
new file mode 100644
index 0000000..0f78fcc
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_minfo_toWireUncompressed2.spec
@@ -0,0 +1,8 @@
+#
+# A simplest form of MINFO: custom rmailbox and default emailbox
+#
+[custom]
+sections: minfo
+[minfo]
+rmailbox: root.example.com.
+rdlen: -1
diff --git a/src/lib/dns/tests/testdata/rdata_srv_fromWire b/src/lib/dns/tests/testdata/rdata_srv_fromWire
new file mode 100644
index 0000000..dac87e9
--- /dev/null
+++ b/src/lib/dns/tests/testdata/rdata_srv_fromWire
@@ -0,0 +1,36 @@
+#
+# various kinds of SRV RDATA stored in an input buffer
+#
+# RDLENGHT=21 bytes
+# 0  1
+ 00 15
+# 2  3  4  5  6  7  8  9 10  1  2  3  4  5  6  7  8  9 20  1  2(bytes)
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+# short length
+# 3  4
+ 00 12
+# 5  6  7  8  9 30  1  2  3  4  5  6  7  8  9 40  1  2  3  4  5
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+# length too long
+# 6  7
+ 00 19
+#
+# 8  9 50  1  2  3  4  5  6  7  8  9 60  1  2  3  4  5  6  7  8
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63 6f 6d 00
+#
+#
+# incomplete target name
+# 9 70
+ 00 12
+# 1  2  3  4  5  6  7  8  9 80  1  2  3  4  5  6  7  8
+ 00 01 00 05 05 dc 01 61 07 65 78 61 6d 70 6c 65 03 63
+#
+#
+# Valid compressed target name: 'a' + pointer
+# 9 90
+ 00 0a
+#
+# 1  2  3  4  5  6  7  8  9 100
+ 00 01 00 05 05 dc 01 61 c0 0a
diff --git a/src/lib/dns/tsigkey.h b/src/lib/dns/tsigkey.h
index 31211d1..6081dd3 100644
--- a/src/lib/dns/tsigkey.h
+++ b/src/lib/dns/tsigkey.h
@@ -113,10 +113,10 @@ public:
     /// \brief Constructor from an input string
     ///
     /// The string must be of the form:
-    /// <name>:<secret>[:<algorithm>]
-    /// Where <name> is a domain name for the key, <secret> is a
+    /// name:secret[:algorithm]
+    /// Where "name" is a domain name for the key, "secret" is a
     /// base64 representation of the key secret, and the optional
-    /// algorithm is an algorithm identifier as specified in RFC4635.
+    /// "algorithm" is an algorithm identifier as specified in RFC 4635.
     /// The default algorithm is hmac-md5.sig-alg.reg.int.
     ///
     /// The same restriction about the algorithm name (and secret) as that
@@ -188,11 +188,10 @@ public:
     ///
     /// The resulting string will be of the form
     /// name:secret:algorithm
-    /// Where <name> is a domain name for the key, <secret> is a
-    /// base64 representation of the key secret, and algorithm is
-    /// an algorithm identifier as specified in RFC4635
+    /// Where "name" is a domain name for the key, "secret" is a
+    /// base64 representation of the key secret, and "algorithm" is
+    /// an algorithm identifier as specified in RFC 4635.
     ///
-    /// \param key the TSIG key to convert
     /// \return The string representation of the given TSIGKey.
     std::string toText() const;
 
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index d0f1d74..b68f3c4 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -126,6 +126,17 @@ public:
         isc::Exception(file, line, what) {}
 };
 
+/// \brief A generic exception that is thrown if a function is called
+/// in a prohibited way.
+///
+/// For example, this can happen if a class method is called when the object's
+/// state does not allow that particular method.
+class InvalidOperation : public Exception {
+public:
+    InvalidOperation(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what) {}
+};
+
 ///
 /// \brief A generic exception that is thrown when an unexpected
 /// error condition occurs.
@@ -137,6 +148,18 @@ public:
 };
 
 ///
+/// \brief A generic exception that is thrown when a function is
+/// not implemented.
+///
+/// This may be due to unfinished implementation or in case the
+/// function isn't even planned to be provided for that situation.
+class NotImplemented : public Exception {
+public:
+    NotImplemented(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what) {}
+};
+
+///
 /// A shortcut macro to insert known values into exception arguments.
 ///
 /// It allows the \c stream argument to be part of a statement using an
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index 9f52724..957d350 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -9,6 +9,7 @@ lib_LTLIBRARIES = liblog.la
 liblog_la_SOURCES  =
 liblog_la_SOURCES += dummylog.h dummylog.cc
 liblog_la_SOURCES += logimpl_messages.cc logimpl_messages.h
+liblog_la_SOURCES += log_dbglevels.h
 liblog_la_SOURCES += log_formatter.h log_formatter.cc
 liblog_la_SOURCES += logger.cc logger.h
 liblog_la_SOURCES += logger_impl.cc logger_impl.h
@@ -21,8 +22,8 @@ liblog_la_SOURCES += logger_name.cc logger_name.h
 liblog_la_SOURCES += logger_specification.h
 liblog_la_SOURCES += logger_support.cc logger_support.h
 liblog_la_SOURCES += logger_unittest_support.cc logger_unittest_support.h
-liblog_la_SOURCES += macros.h
 liblog_la_SOURCES += log_messages.cc log_messages.h
+liblog_la_SOURCES += macros.h
 liblog_la_SOURCES += message_dictionary.cc message_dictionary.h
 liblog_la_SOURCES += message_exception.h
 liblog_la_SOURCES += message_initializer.cc message_initializer.h
diff --git a/src/lib/log/README b/src/lib/log/README
index 3747cb1..3693abb 100644
--- a/src/lib/log/README
+++ b/src/lib/log/README
@@ -477,6 +477,11 @@ the severity system:
 When a particular severity is set, it - and all severities and/or debug
 levels above it - will be logged.
 
+To try to ensure that the information from different modules is roughly
+comparable for the same debug level, a set of standard debug levels has
+been defined for common type of debug output.  However, modules are free
+to set their own debug levels or define additional ones.
+
 Logging Sources v Logging Severities
 ------------------------------------
 When logging events, make a distinction between events related to the
diff --git a/src/lib/log/log_dbglevels.h b/src/lib/log/log_dbglevels.h
new file mode 100644
index 0000000..d713714
--- /dev/null
+++ b/src/lib/log/log_dbglevels.h
@@ -0,0 +1,93 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __LOG_DBGLVLS_H
+#define __LOG_DBGLVLS_H
+
+/// \file
+///
+/// When a message is logged with DEBUG severity, the debug level associated
+/// with the message is also specified.  This debug level is a number
+/// ranging from 0 to 99; the idea is that the higher the debug level, the
+/// more detailed the message.
+///
+/// If debug messages are being logged, the logging system allows them to be
+/// filtered by debug level - only messages logged with a level equal to or
+/// less than the set debug level will be output.  (For example, if the
+/// filter is set to 30, only debug messages logged with levels in the range
+/// 0 to 30 will be output; messages logged with levels 31 to 99 will be
+/// suppressed.)
+///
+/// Levels of 30 or below are reserved for debug messages that are most
+/// likely to be useful for an administrator. Levels 31 to 99 are for use by
+/// someone familiar with the code. "Useful for an administrator" is,
+/// admittedly, a subjective term: it is loosely defined as messages helping
+/// someone diagnose a problem that they could solve without needing to dive
+/// into the code.  So it covers things like start-up steps and configuration
+/// messages.
+///
+/// In practice, this means that levels of 30 and below are most-likely to
+/// be used by the top-level programs, and 31 and above by the various
+/// libraries.
+///
+/// This file defines a set of standard debug levels for use across all loggers.
+/// In this way users can have some expection of what will be output when
+/// enabling debugging.  Symbols are prefixed DBGLVL so as not to clash with
+/// DBG_ symbols in the various modules.
+///
+/// \note If the names of debug constants are changed, or if ones are added or
+/// removed, edit the file src/lib/python/isc/log/log.cc to update the log
+/// level definitions available to Python.  The change does not need to be
+/// made if only the numeric values of constants are updated.
+
+namespace {
+
+/// Process startup/shutdown debug messages.  Note that these are _debug_
+/// messages, as other messages related to startup and shutdown may be output
+/// with another severity.  For example, when the authoritative server starts
+/// up, the "server started" message could be output at a severity of INFO.
+/// "Server starting" and messages indicating the stages in startup should be
+/// debug messages output at this severity.
+///
+/// This is given a value of 0 as that is the level selected if debugging is
+/// enabled without giving a level.
+const int DBGLVL_START_SHUT = 0;
+
+/// This debug level is reserved for logging the exchange of messages/commands
+/// between processes, including configuration messages.
+const int DBGLVL_COMMAND = 10;
+
+/// If the commands have associated data, this level is when they are printed.
+/// This includes configuration messages.
+const int DBGLVL_COMMAND_DATA = 20;
+
+// The following constants are suggested values for common operations.
+// Depending on the exact nature of the code, modules may or may not use these
+// levels.
+
+/// Trace basic operations.
+const int DBGLVL_TRACE_BASIC = 40;
+
+/// Trace data associated with the basic operations.
+const int DBGLVL_TRACE_BASIC_DATA = 45;
+
+/// Trace detailed operations.
+const int DBGLVL_TRACE_DETAIL = 50;
+
+/// Trace data associated with detailed operations.
+const int DBGLVL_TRACE_DETAIL_DATA = 55;
+
+}   // Anonymous namespace
+
+#endif // __LOG_DBGLVLS_H
diff --git a/src/lib/log/log_formatter.h b/src/lib/log/log_formatter.h
index ca23844..7a9e5fa 100644
--- a/src/lib/log/log_formatter.h
+++ b/src/lib/log/log_formatter.h
@@ -169,7 +169,7 @@ public:
     /// Deactivates the current formatter. In case the formatter is not active,
     /// only produces another inactive formatter.
     ///
-    /// \param arg The argument to place into the placeholder.
+    /// \param value The argument to place into the placeholder.
     template<class Arg> Formatter& arg(const Arg& value) {
         if (logger_) {
             try {
diff --git a/src/lib/log/logger_level_impl.h b/src/lib/log/logger_level_impl.h
index 9289a1d..c990796 100644
--- a/src/lib/log/logger_level_impl.h
+++ b/src/lib/log/logger_level_impl.h
@@ -83,7 +83,7 @@ public:
     /// The log4cplus log level may be non-standard in which case it is
     /// encoding a BIND 10 debug level as well.
     ///
-    /// \param level log4cplus log level
+    /// \param loglevel log4cplus log level
     ///
     /// \return Equivalent BIND 10 severity and debug level
     static
diff --git a/src/lib/log/logger_manager_impl.h b/src/lib/log/logger_manager_impl.h
index aa596a0..f99f832 100644
--- a/src/lib/log/logger_manager_impl.h
+++ b/src/lib/log/logger_manager_impl.h
@@ -59,8 +59,6 @@ public:
     /// This resets the hierachy of loggers back to their defaults.  This means
     /// that all non-root loggers (if they exist) are set to NOT_SET, and the
     /// root logger reset to logging informational messages.
-    ///
-    /// \param root_name BIND 10 name of the root logger
     static void processInit();
 
     /// \brief Process Specification
diff --git a/src/lib/log/logger_specification.h b/src/lib/log/logger_specification.h
index 35c879c..6805fdd 100644
--- a/src/lib/log/logger_specification.h
+++ b/src/lib/log/logger_specification.h
@@ -103,7 +103,7 @@ public:
 
     /// \brief Add output option.
     ///
-    /// \param Option to add to the list.
+    /// \param option Option to add to the list.
     void addOutputOption(const OutputOption& option) {
         options_.push_back(option);
     }
diff --git a/src/lib/log/macros.h b/src/lib/log/macros.h
index 3128131..42fb42e 100644
--- a/src/lib/log/macros.h
+++ b/src/lib/log/macros.h
@@ -16,6 +16,7 @@
 #define __LOG_MACROS_H
 
 #include <log/logger.h>
+#include <log/log_dbglevels.h>
 
 /// \brief Macro to conveniently test debug output and log it
 #define LOG_DEBUG(LOGGER, LEVEL, MESSAGE) \
diff --git a/src/lib/log/message_dictionary.h b/src/lib/log/message_dictionary.h
index 23f76d7..519986d 100644
--- a/src/lib/log/message_dictionary.h
+++ b/src/lib/log/message_dictionary.h
@@ -79,7 +79,7 @@ public:
     ///
     /// \return true if the message was added to the dictionary, false if the
     /// message existed and it was not added.
-    virtual bool add (const std::string& ident, const std::string& test);
+    virtual bool add (const std::string& ident, const std::string& text);
 
 
     /// \brief Replace Message
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index 069a7b4..a5f793c 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -45,7 +45,7 @@ run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
 endif
 
-check_PROGRAMS = logger_example
+noinst_PROGRAMS = logger_example
 logger_example_SOURCES = logger_example.cc
 logger_example_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 logger_example_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
@@ -53,7 +53,7 @@ logger_example_LDADD  = $(top_builddir)/src/lib/log/liblog.la
 logger_example_LDADD += $(top_builddir)/src/lib/util/libutil.la
 logger_example_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 
-check_PROGRAMS += init_logger_test
+noinst_PROGRAMS += init_logger_test
 init_logger_test_SOURCES = init_logger_test.cc
 init_logger_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 init_logger_test_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
@@ -61,7 +61,7 @@ init_logger_test_LDADD  = $(top_builddir)/src/lib/log/liblog.la
 init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
 init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 
-noinst_PROGRAMS = $(TESTS)
+noinst_PROGRAMS += $(TESTS)
 
 # Additional test using the shell.  These are principally tests
 # where the global logging environment is affected, and where the
diff --git a/src/lib/nsas/nameserver_address_store.h b/src/lib/nsas/nameserver_address_store.h
index 87845c9..1af535a 100644
--- a/src/lib/nsas/nameserver_address_store.h
+++ b/src/lib/nsas/nameserver_address_store.h
@@ -92,7 +92,10 @@ public:
 
     /// \brief cancel the given lookup action
     ///
-    /// \param callback Callback object that would be called
+    /// \param zone Name of zone.
+    /// \param class_code Class of the zone.
+    /// \param callback Callback object that would be called.
+    /// \param family Address family for which lookup is being cancelled.
     void cancel(const std::string& zone, const dns::RRClass& class_code,
                 const boost::shared_ptr<AddressRequestCallback>& callback,
                 AddressFamily family = ANY_OK);
diff --git a/src/lib/nsas/nsas_log.h b/src/lib/nsas/nsas_log.h
index ec6844f..031f46d 100644
--- a/src/lib/nsas/nsas_log.h
+++ b/src/lib/nsas/nsas_log.h
@@ -29,15 +29,15 @@ namespace nsas {
 // The first level traces normal operations - asking the NSAS for an address,
 // and cancelling a lookup.  It also records when the NSAS calls back to the
 // resolver to resolve something.
-const int NSAS_DBG_TRACE = 10;
+const int NSAS_DBG_TRACE = DBGLVL_TRACE_BASIC;
 
 // The next level extends the normal operations and records the results of the
 // lookups.
-const int NSAS_DBG_RESULTS = 20;
+const int NSAS_DBG_RESULTS = DBGLVL_TRACE_BASIC_DATA;
 
 // Additional information on the usage of the names - the RTT values obtained
 // when queries were done.
-const int NSAS_DBG_RTT = 30;
+const int NSAS_DBG_RTT = DBGLVL_TRACE_DETAIL_DATA;
 
 
 /// \brief NSAS Logger
diff --git a/src/lib/nsas/zone_entry.h b/src/lib/nsas/zone_entry.h
index f772784..482b89f 100644
--- a/src/lib/nsas/zone_entry.h
+++ b/src/lib/nsas/zone_entry.h
@@ -66,7 +66,7 @@ public:
      *     different objects.
      * \param nameserver_table Hashtable of NameServerEntry objects for
      *     this zone
-     * \param namesever_lru LRU for the nameserver entries
+     * \param nameserver_lru LRU for the nameserver entries
      * \todo Move to cc file, include the lookup (if NSAS uses resolver for
      *     everything)
      */
diff --git a/src/lib/python/Makefile.am b/src/lib/python/Makefile.am
index 5924294..893bb8c 100644
--- a/src/lib/python/Makefile.am
+++ b/src/lib/python/Makefile.am
@@ -1,15 +1,8 @@
 SUBDIRS = isc
 
-python_PYTHON =	bind10_config.py
+nodist_python_PYTHON =	bind10_config.py
 pythondir = $(pyexecdir)
 
-# Explicitly define DIST_COMMON so ${python_PYTHON} is not included
-# as we don't want the generated file included in distributed tarfile.
-DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in bind10_config.py.in
-
-# When setting DIST_COMMON, then need to add the .in file too.
-EXTRA_DIST =  bind10_config.py.in
-
 CLEANFILES = bind10_config.pyc
 CLEANDIRS = __pycache__
 
diff --git a/src/lib/python/bind10_config.py.in b/src/lib/python/bind10_config.py.in
index 69b17ed..e54b1a8 100644
--- a/src/lib/python/bind10_config.py.in
+++ b/src/lib/python/bind10_config.py.in
@@ -23,6 +23,10 @@ def reload():
     global DATA_PATH
     global PLUGIN_PATHS
     global PREFIX
+    global LIBEXECDIR
+    LIBEXECDIR = ("@libexecdir@/@PACKAGE@"). \
+        replace("${exec_prefix}", "@exec_prefix@"). \
+        replace("${prefix}", "@prefix@")
     BIND10_MSGQ_SOCKET_FILE = os.path.join("@localstatedir@",
                                            "@PACKAGE_NAME@",
                                            "msgq_socket").replace("${prefix}",
diff --git a/src/lib/python/isc/Makefile.am b/src/lib/python/isc/Makefile.am
index b391c1e..a3e74c5 100644
--- a/src/lib/python/isc/Makefile.am
+++ b/src/lib/python/isc/Makefile.am
@@ -1,4 +1,5 @@
-SUBDIRS = datasrc cc config log net notify util testutils acl
+SUBDIRS = datasrc cc config dns log net notify util testutils acl bind10
+SUBDIRS += xfrin log_messages
 
 python_PYTHON = __init__.py
 
diff --git a/src/lib/python/isc/__init__.py b/src/lib/python/isc/__init__.py
index 8fcbf42..029f110 100644
--- a/src/lib/python/isc/__init__.py
+++ b/src/lib/python/isc/__init__.py
@@ -1,4 +1,7 @@
-import isc.datasrc
+# On some systems, it appears the dynamic linker gets
+# confused if the order is not right here
+# There is probably a solution for this, but for now:
+# order is important here!
 import isc.cc
 import isc.config
-#import isc.dns
+import isc.datasrc
diff --git a/src/lib/python/isc/acl/Makefile.am b/src/lib/python/isc/acl/Makefile.am
index cabc0a3..b1afa15 100644
--- a/src/lib/python/isc/acl/Makefile.am
+++ b/src/lib/python/isc/acl/Makefile.am
@@ -4,10 +4,10 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-python_PYTHON = __init__.py
+python_PYTHON = __init__.py dns.py
 pythondir = $(PYTHON_SITEPKG_DIR)/isc/acl
 
-pyexec_LTLIBRARIES = acl.la dns.la
+pyexec_LTLIBRARIES = acl.la _dns.la
 pyexecdir = $(PYTHON_SITEPKG_DIR)/isc/acl
 
 acl_la_SOURCES = acl.cc
@@ -15,14 +15,14 @@ acl_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
 acl_la_LDFLAGS = $(PYTHON_LDFLAGS)
 acl_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 
-dns_la_SOURCES = dns.h dns.cc dns_requestacl_python.h dns_requestacl_python.cc
-dns_la_SOURCES += dns_requestcontext_python.h dns_requestcontext_python.cc
-dns_la_SOURCES += dns_requestloader_python.h dns_requestloader_python.cc
-dns_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
-dns_la_LDFLAGS = $(PYTHON_LDFLAGS)
+_dns_la_SOURCES = dns.h dns.cc dns_requestacl_python.h dns_requestacl_python.cc
+_dns_la_SOURCES += dns_requestcontext_python.h dns_requestcontext_python.cc
+_dns_la_SOURCES += dns_requestloader_python.h dns_requestloader_python.cc
+_dns_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+_dns_la_LDFLAGS = $(PYTHON_LDFLAGS)
 # Note: PYTHON_CXXFLAGS may have some -Wno... workaround, which must be
 # placed after -Wextra defined in AM_CXXFLAGS
-dns_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+_dns_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 
 # Python prefers .so, while some OSes (specifically MacOS) use a different
 # suffix for dynamic objects.  -module is necessary to work this around.
@@ -30,11 +30,11 @@ acl_la_LDFLAGS += -module
 acl_la_LIBADD = $(top_builddir)/src/lib/acl/libacl.la
 acl_la_LIBADD += $(PYTHON_LIB)
 
-dns_la_LDFLAGS += -module
-dns_la_LIBADD = $(top_builddir)/src/lib/acl/libdnsacl.la
-dns_la_LIBADD += $(PYTHON_LIB)
+_dns_la_LDFLAGS += -module
+_dns_la_LIBADD = $(top_builddir)/src/lib/acl/libdnsacl.la
+_dns_la_LIBADD += $(PYTHON_LIB)
 
-EXTRA_DIST = acl.py dns.py
+EXTRA_DIST = acl.py _dns.py
 EXTRA_DIST += acl_inc.cc
 EXTRA_DIST += dnsacl_inc.cc dns_requestacl_inc.cc dns_requestcontext_inc.cc
 EXTRA_DIST += dns_requestloader_inc.cc
diff --git a/src/lib/python/isc/acl/_dns.py b/src/lib/python/isc/acl/_dns.py
new file mode 100644
index 0000000..a645a7b
--- /dev/null
+++ b/src/lib/python/isc/acl/_dns.py
@@ -0,0 +1,29 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This file is not installed; The .so version will be installed into the right
+# place at installation time.
+# This helper script is only to find it in the .libs directory when we run
+# as a test or from the build directory.
+
+import os
+import sys
+
+for base in sys.path[:]:
+    bindingdir = os.path.join(base, 'isc/acl/.libs')
+    if os.path.exists(bindingdir):
+        sys.path.insert(0, bindingdir)
+
+from _dns import *
diff --git a/src/lib/python/isc/acl/dns.cc b/src/lib/python/isc/acl/dns.cc
index 351a8b3..eb3b57b 100644
--- a/src/lib/python/isc/acl/dns.cc
+++ b/src/lib/python/isc/acl/dns.cc
@@ -52,7 +52,7 @@ PyMethodDef methods[] = {
 
 PyModuleDef dnsacl = {
     { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
-    "isc.acl.dns",
+    "isc.acl._dns",
     dnsacl_doc,
     -1,
     methods,
@@ -90,7 +90,7 @@ getACLException(const char* ex_name) {
 }
 
 PyMODINIT_FUNC
-PyInit_dns(void) {
+PyInit__dns(void) {
     PyObject* mod = PyModule_Create(&dnsacl);
     if (mod == NULL) {
         return (NULL);
diff --git a/src/lib/python/isc/acl/dns.py b/src/lib/python/isc/acl/dns.py
index 8070559..0733bc3 100644
--- a/src/lib/python/isc/acl/dns.py
+++ b/src/lib/python/isc/acl/dns.py
@@ -13,21 +13,61 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
-# This file is not installed. The log.so is installed into the right place.
-# It is only to find it in the .libs directory when we run as a test or
-# from the build directory.
-# But as nobody gives us the builddir explicitly (and we can't use generation
-# from .in file, as it would put us into the builddir and we wouldn't be found)
-# we guess from current directory. Any idea for something better? This should
-# be enough for the tests, but would it work for B10_FROM_SOURCE as well?
-# Should we look there? Or define something in bind10_config?
-
-import os
-import sys
-
-for base in sys.path[:]:
-    bindingdir = os.path.join(base, 'isc/acl/.libs')
-    if os.path.exists(bindingdir):
-        sys.path.insert(0, bindingdir)
-
-from dns import *
+"""\
+This module provides Python bindings for the C++ classes in the
+isc::acl::dns namespace.  Specifically, it defines Python interfaces of
+handling access control lists (ACLs) with DNS related contexts.
+The actual binding is implemented in an effectively hidden module,
+isc.acl._dns; this frontend module is in terms of implementation so that
+the C++ binding code doesn't have to deal with complicated operations
+that could be done in a more straightforward way in native Python.
+
+For further details of the actual module, see the documentation of the
+_dns module.
+"""
+
+import pydnspp
+
+import isc.acl._dns
+from isc.acl._dns import *
+
+class RequestACL(isc.acl._dns.RequestACL):
+    """A straightforward wrapper subclass of isc.acl._dns.RequestACL.
+
+    See the base class documentation for more implementation.
+    """
+    pass
+
+class RequestLoader(isc.acl._dns.RequestLoader):
+    """A straightforward wrapper subclass of isc.acl._dns.RequestLoader.
+
+    See the base class documentation for more implementation.
+    """
+    pass
+
+class RequestContext(isc.acl._dns.RequestContext):
+    """A straightforward wrapper subclass of isc.acl._dns.RequestContext.
+
+    See the base class documentation for more implementation.
+    """
+
+    def __init__(self, remote_address, tsig=None):
+        """Wrapper for the RequestContext constructor.
+
+        Internal implementation details that the users don't have to
+        worry about: To avoid dealing with pydnspp bindings in the C++ code,
+        this wrapper converts the TSIG record in its wire format in the form
+        of byte data, and has the binding re-construct the record from it.
+        """
+        tsig_wire = b''
+        if tsig is not None:
+            if not isinstance(tsig, pydnspp.TSIGRecord):
+                raise TypeError("tsig must be a TSIGRecord, not %s" %
+                                tsig.__class__.__name__)
+            tsig_wire = tsig.to_wire(tsig_wire)
+        isc.acl._dns.RequestContext.__init__(self, remote_address, tsig_wire)
+
+    def __str__(self):
+        """Wrap __str__() to convert the module name."""
+        s = isc.acl._dns.RequestContext.__str__(self)
+        return s.replace('<isc.acl._dns', '<isc.acl.dns')
diff --git a/src/lib/python/isc/acl/dns_requestacl_python.cc b/src/lib/python/isc/acl/dns_requestacl_python.cc
index 5e5acea..1c38a30 100644
--- a/src/lib/python/isc/acl/dns_requestacl_python.cc
+++ b/src/lib/python/isc/acl/dns_requestacl_python.cc
@@ -114,7 +114,7 @@ namespace python {
 // Most of the functions are not actually implemented and NULL here.
 PyTypeObject requestacl_type = {
     PyVarObject_HEAD_INIT(NULL, 0)
-    "isc.acl.dns.RequestACL",
+    "isc.acl._dns.RequestACL",
     sizeof(s_RequestACL),                 // tp_basicsize
     0,                                  // tp_itemsize
     RequestACL_destroy,                // tp_dealloc
@@ -132,7 +132,7 @@ PyTypeObject requestacl_type = {
     NULL,                               // tp_getattro
     NULL,                               // tp_setattro
     NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
     RequestACL_doc,
     NULL,                               // tp_traverse
     NULL,                               // tp_clear
diff --git a/src/lib/python/isc/acl/dns_requestcontext_inc.cc b/src/lib/python/isc/acl/dns_requestcontext_inc.cc
index 9e80e1f..f71bc59 100644
--- a/src/lib/python/isc/acl/dns_requestcontext_inc.cc
+++ b/src/lib/python/isc/acl/dns_requestcontext_inc.cc
@@ -5,18 +5,18 @@ DNS request to be checked.\n\
 This plays the role of ACL context for the RequestACL object.\n\
 \n\
 Based on the minimalist philosophy, the initial implementation only\n\
-maintains the remote (source) IP address of the request. The plan is\n\
-to add more parameters of the request. A scheduled next step is to\n\
-support the TSIG key (if it's included in the request). Other\n\
-possibilities are the local (destination) IP address, the remote and\n\
-local port numbers, various fields of the DNS request (e.g. a\n\
-particular header flag value).\n\
+maintains the remote (source) IP address of the request and\n\
+(optionally) the TSIG record included in the request. We may add more\n\
+parameters of the request as we see the need for them. Possible\n\
+additional parameters are the local (destination) IP address, the\n\
+remote and local port numbers, various fields of the DNS request (e.g.\n\
+a particular header flag value).\n\
 \n\
-RequestContext(remote_address)\n\
+RequestContext(remote_address, tsig)\n\
 \n\
     In this initial implementation, the constructor only takes a\n\
     remote IP address in the form of a socket address as used in the\n\
-    Python socket module.\n\
+    Python socket module, and optionally a pydnspp.TSIGRecord object.\n\
 \n\
     Exceptions:\n\
       isc.acl.ACLError Normally shouldn't happen, but still possible\n\
@@ -25,6 +25,9 @@ RequestContext(remote_address)\n\
 \n\
     Parameters:\n\
       remote_address The remote IP address\n\
+      tsig   The TSIG record included in the request message, if any.\n\
+             If the request doesn't include a TSIG, this will be None.\n\
+             If this parameter is omitted None will be assumed.\n\
 \n\
 ";
 } // unnamed namespace
diff --git a/src/lib/python/isc/acl/dns_requestcontext_python.cc b/src/lib/python/isc/acl/dns_requestcontext_python.cc
index 6c63b59..7f33f59 100644
--- a/src/lib/python/isc/acl/dns_requestcontext_python.cc
+++ b/src/lib/python/isc/acl/dns_requestcontext_python.cc
@@ -14,7 +14,7 @@
 
 // Enable this if you use s# variants with PyArg_ParseTuple(), see
 // http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
-//#define PY_SSIZE_T_CLEAN
+#define PY_SSIZE_T_CLEAN
 
 // Python.h needs to be placed at the head of the program file, see:
 // http://docs.python.org/py3k/extending/extending.html#a-simple-example
@@ -37,8 +37,16 @@
 
 #include <exceptions/exceptions.h>
 
+#include <util/buffer.h>
 #include <util/python/pycppwrapper_util.h>
 
+#include <dns/name.h>
+#include <dns/rrclass.h>
+#include <dns/rrtype.h>
+#include <dns/rrttl.h>
+#include <dns/rdata.h>
+#include <dns/tsigrecord.h>
+
 #include <acl/dns.h>
 #include <acl/ip_check.h>
 
@@ -49,6 +57,8 @@ using namespace std;
 using boost::scoped_ptr;
 using boost::lexical_cast;
 using namespace isc;
+using namespace isc::dns;
+using namespace isc::dns::rdata;
 using namespace isc::util::python;
 using namespace isc::acl::dns;
 using namespace isc::acl::dns::python;
@@ -59,11 +69,39 @@ namespace dns {
 namespace python {
 
 struct s_RequestContext::Data {
-    // The constructor.  Currently it only accepts the information of the
-    // request source address, and contains all necessary logic in the body
-    // of the constructor.  As it's extended we may have refactor it by
-    // introducing helper methods.
-    Data(const char* const remote_addr, const unsigned short remote_port) {
+    // The constructor.
+    Data(const char* const remote_addr, const unsigned short remote_port,
+         const char* tsig_data, const Py_ssize_t tsig_len)
+    {
+        createRemoteAddr(remote_addr, remote_port);
+        createTSIGRecord(tsig_data, tsig_len);
+    }
+
+    // A convenient type converter from sockaddr_storage to sockaddr
+    const struct sockaddr& getRemoteSockaddr() const {
+        const void* p = &remote_ss;
+        return (*static_cast<const struct sockaddr*>(p));
+    }
+
+    // The remote (source) IP address of the request.  Note that it needs
+    // a reference to remote_ss.  That's why the latter is stored within
+    // this structure.
+    scoped_ptr<IPAddress> remote_ipaddr;
+
+    // The effective length of remote_ss.  It's necessary for getnameinfo()
+    // called from sockaddrToText (__str__ backend).
+    socklen_t remote_salen;
+
+    // The TSIG record included in the request, if any.  If the request
+    // doesn't contain a TSIG, this will be NULL.
+    scoped_ptr<TSIGRecord> tsig_record;
+
+private:
+    // A helper method for the constructor that is responsible for constructing
+    // the remote address.
+    void createRemoteAddr(const char* const remote_addr,
+                          const unsigned short remote_port)
+    {
         struct addrinfo hints, *res;
         memset(&hints, 0, sizeof(hints));
         hints.ai_family = AF_UNSPEC;
@@ -85,20 +123,31 @@ struct s_RequestContext::Data {
         remote_ipaddr.reset(new IPAddress(getRemoteSockaddr()));
     }
 
-    // A convenient type converter from sockaddr_storage to sockaddr
-    const struct sockaddr& getRemoteSockaddr() const {
-        const void* p = &remote_ss;
-        return (*static_cast<const struct sockaddr*>(p));
-    }
-
-    // The remote (source) IP address the request.  Note that it needs
-    // a reference to remote_ss.  That's why the latter is stored within
-    // this structure.
-    scoped_ptr<IPAddress> remote_ipaddr;
+    // A helper method for the constructor that is responsible for constructing
+    // the request TSIG.
+    void createTSIGRecord(const char* tsig_data, const Py_ssize_t tsig_len) {
+        if (tsig_len == 0) {
+            return;
+        }
 
-    // The effective length of remote_ss.  It's necessary for getnameinf()
-    // called from sockaddrToText (__str__ backend).
-    socklen_t remote_salen;
+        // Re-construct the TSIG record from the passed binary.  This should
+        // normally succeed because we are generally expected to be called
+        // from the frontend .py, which converts a valid TSIGRecord in its
+        // wire format.  If some evil or buggy python program directly calls
+        // us with bogus data, validation in libdns++ will trigger an
+        // exception, which will be caught and converted to a Python exception
+        // in RequestContext_init().
+        isc::util::InputBuffer b(tsig_data, tsig_len);
+        const Name key_name(b);
+        const RRType tsig_type(b.readUint16());
+        const RRClass tsig_class(b.readUint16());
+        const RRTTL ttl(b.readUint32());
+        const size_t rdlen(b.readUint16());
+        const ConstRdataPtr rdata = createRdata(tsig_type, tsig_class, b,
+                                                rdlen);
+        tsig_record.reset(new TSIGRecord(key_name, tsig_class, ttl,
+                                         *rdata, 0));
+    }
 
 private:
     struct sockaddr_storage remote_ss;
@@ -145,31 +194,41 @@ RequestContext_init(PyObject* po_self, PyObject* args, PyObject*) {
     s_RequestContext* const self = static_cast<s_RequestContext*>(po_self);
 
     try {
-        // In this initial implementation, the constructor is simply: It
-        // takes a single parameter, which should be a Python socket address
-        // object.  For IPv4, it's ('address test', numeric_port); for IPv6,
+        // In this initial implementation, the constructor is simple: It
+        // takes two parameters.  The first parameter should be a Python
+        // socket address object.
+        // For IPv4, it's ('address test', numeric_port); for IPv6,
         // it's ('address text', num_port, num_flowid, num_zoneid).
+        // The second parameter is wire-format TSIG record in the form of
+        // Python byte data.  If the TSIG isn't included in the request,
+        // its length will be 0.
         // Below, we parse the argument in the most straightforward way.
         // As the constructor becomes more complicated, we should probably
         // make it more structural (for example, we should first retrieve
-        // the socket address as a PyObject, and parse it recursively)
+        // the python objects, and parse them recursively)
 
         const char* remote_addr;
         unsigned short remote_port;
         unsigned int remote_flowinfo; // IPv6 only, unused here
         unsigned int remote_zoneid; // IPv6 only, unused here
-
-        if (PyArg_ParseTuple(args, "(sH)", &remote_addr, &remote_port) ||
-            PyArg_ParseTuple(args, "(sHII)", &remote_addr, &remote_port,
-                             &remote_flowinfo, &remote_zoneid))
+        const char* tsig_data;
+        Py_ssize_t tsig_len;
+
+        if (PyArg_ParseTuple(args, "(sH)y#", &remote_addr, &remote_port,
+                             &tsig_data, &tsig_len) ||
+            PyArg_ParseTuple(args, "(sHII)y#", &remote_addr, &remote_port,
+                             &remote_flowinfo, &remote_zoneid,
+                             &tsig_data, &tsig_len))
         {
-            // We need to clear the error in case the first call to PareTuple
+            // We need to clear the error in case the first call to ParseTuple
             // fails.
             PyErr_Clear();
 
             auto_ptr<s_RequestContext::Data> dataptr(
-                new s_RequestContext::Data(remote_addr, remote_port));
-            self->cppobj = new RequestContext(*dataptr->remote_ipaddr);
+                new s_RequestContext::Data(remote_addr, remote_port,
+                                           tsig_data, tsig_len));
+            self->cppobj = new RequestContext(*dataptr->remote_ipaddr,
+                                              dataptr->tsig_record.get());
             self->data_ = dataptr.release();
             return (0);
         }
@@ -224,7 +283,11 @@ RequestContext_str(PyObject* po_self) {
         objss << "<" << requestcontext_type.tp_name << " object, "
               << "remote_addr="
               << sockaddrToText(self->data_->getRemoteSockaddr(),
-                                self->data_->remote_salen) << ">";
+                                self->data_->remote_salen);
+        if (self->data_->tsig_record) {
+            objss << ", key=" << self->data_->tsig_record->getName();
+        }
+        objss << ">";
         return (Py_BuildValue("s", objss.str().c_str()));
     } catch (const exception& ex) {
         const string ex_what =
@@ -248,7 +311,7 @@ namespace python {
 // Most of the functions are not actually implemented and NULL here.
 PyTypeObject requestcontext_type = {
     PyVarObject_HEAD_INIT(NULL, 0)
-    "isc.acl.dns.RequestContext",
+    "isc.acl._dns.RequestContext",
     sizeof(s_RequestContext),                 // tp_basicsize
     0,                                  // tp_itemsize
     RequestContext_destroy,             // tp_dealloc
@@ -266,7 +329,7 @@ PyTypeObject requestcontext_type = {
     NULL,                               // tp_getattro
     NULL,                               // tp_setattro
     NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
     RequestContext_doc,
     NULL,                               // tp_traverse
     NULL,                               // tp_clear
diff --git a/src/lib/python/isc/acl/dns_requestloader_python.cc b/src/lib/python/isc/acl/dns_requestloader_python.cc
index 1ddff4c..ab421c5 100644
--- a/src/lib/python/isc/acl/dns_requestloader_python.cc
+++ b/src/lib/python/isc/acl/dns_requestloader_python.cc
@@ -171,7 +171,7 @@ namespace python {
 // Most of the functions are not actually implemented and NULL here.
 PyTypeObject requestloader_type = {
     PyVarObject_HEAD_INIT(NULL, 0)
-    "isc.acl.dns.RequestLoader",
+    "isc.acl._dns.RequestLoader",
     sizeof(s_RequestLoader),                 // tp_basicsize
     0,                                  // tp_itemsize
     RequestLoader_destroy,       // tp_dealloc
@@ -189,7 +189,7 @@ PyTypeObject requestloader_type = {
     NULL,                               // tp_getattro
     NULL,                               // tp_setattro
     NULL,                               // tp_as_buffer
-    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, // tp_flags
     RequestLoader_doc,
     NULL,                               // tp_traverse
     NULL,                               // tp_clear
diff --git a/src/lib/python/isc/acl/tests/Makefile.am b/src/lib/python/isc/acl/tests/Makefile.am
index 64737d2..e0a1895 100644
--- a/src/lib/python/isc/acl/tests/Makefile.am
+++ b/src/lib/python/isc/acl/tests/Makefile.am
@@ -7,7 +7,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,7 +19,7 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_builddir)/src/lib/isc/python/acl/.libs:$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/isc/python/acl/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/acl/tests/dns_test.py b/src/lib/python/isc/acl/tests/dns_test.py
index acaf32b..7ee3023 100644
--- a/src/lib/python/isc/acl/tests/dns_test.py
+++ b/src/lib/python/isc/acl/tests/dns_test.py
@@ -15,6 +15,7 @@
 
 import unittest
 import socket
+from pydnspp import *
 from isc.acl.acl import LoaderError, Error, ACCEPT, REJECT, DROP
 from isc.acl.dns import *
 
@@ -39,12 +40,37 @@ def get_acl_json(prefix):
     json[0]["from"] = prefix
     return REQUEST_LOADER.load(json)
 
-def get_context(address):
+# The following two are similar to the previous two, but use a TSIG key name
+# instead of IP prefix.
+def get_tsig_acl(key):
+    return REQUEST_LOADER.load('[{"action": "ACCEPT", "key": "' + \
+                                   key + '"}]')
+
+def get_tsig_acl_json(key):
+    json = [{"action": "ACCEPT"}]
+    json[0]["key"] = key
+    return REQUEST_LOADER.load(json)
+
+# commonly used TSIG RDATA.  For the purpose of ACL checks only the key name
+# matters; other parrameters are simply borrowed from some other tests, which
+# can be anything for the purpose of the tests here.
+TSIG_RDATA = TSIG("hmac-md5.sig-alg.reg.int. 1302890362 " + \
+                      "300 16 2tra2tra2tra2tra2tra2g== " + \
+                      "11621 0 0")
+
+def get_context(address, key_name=None):
     '''This is a simple shortcut wrapper for creating a RequestContext
-    object with a given IP address.  Port number doesn't matter in the test
-    (as of the initial implementation), so it's fixed for simplicity.
+    object with a given IP address and optionally TSIG key  name.
+    Port number doesn't matter in the test (as of the initial implementation),
+    so it's fixed for simplicity.
+    If key_name is not None, it internally creates a (faked) TSIG record
+    and constructs a context with that key.  Note that only the key name
+    matters for the purpose of ACL checks.
     '''
-    return RequestContext(get_sockaddr(address, 53000))
+    tsig_record = None
+    if key_name is not None:
+        tsig_record = TSIGRecord(Name(key_name), TSIG_RDATA)
+    return RequestContext(get_sockaddr(address, 53000), tsig_record)
 
 # These are commonly used RequestContext object
 CONTEXT4 = get_context('192.0.2.1')
@@ -63,6 +89,21 @@ class RequestContextTest(unittest.TestCase):
                          RequestContext(('2001:db8::1234', 53006,
                                          0, 0)).__str__())
 
+        # Construct the context from IP address and a TSIG record.
+        tsig_record = TSIGRecord(Name("key.example.com"), TSIG_RDATA)
+        self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+                             'remote_addr=[192.0.2.1]:53001, ' + \
+                             'key=key.example.com.>',
+                         RequestContext(('192.0.2.1', 53001),
+                                        tsig_record).__str__())
+
+        # same with IPv6 address, just in case.
+        self.assertEqual('<isc.acl.dns.RequestContext object, ' + \
+                             'remote_addr=[2001:db8::1234]:53006, ' + \
+                             'key=key.example.com.>',
+                         RequestContext(('2001:db8::1234', 53006,
+                                         0, 0), tsig_record).__str__())
+
         # Unusual case: port number overflows (this constructor allows that,
         # although it should be rare anyway; the socket address should
         # normally come from the Python socket module.
@@ -89,7 +130,9 @@ class RequestContextTest(unittest.TestCase):
         # not a tuple
         self.assertRaises(TypeError, RequestContext, 1)
         # invalid number of parameters
-        self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), 0)
+        self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), 0, 1)
+        # type error for TSIG
+        self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 53), tsig=1)
         # tuple is not in the form of sockaddr
         self.assertRaises(TypeError, RequestContext, (0, 53))
         self.assertRaises(TypeError, RequestContext, ('192.0.2.1', 'http'))
@@ -159,10 +202,22 @@ class RequestACLTest(unittest.TestCase):
         self.assertRaises(LoaderError, REQUEST_LOADER.load,
                           [{"action": "ACCEPT", "from": []}])
         self.assertRaises(LoaderError, REQUEST_LOADER.load,
+                          '[{"action": "ACCEPT", "key": 1}]')
+        self.assertRaises(LoaderError, REQUEST_LOADER.load,
+                          [{"action": "ACCEPT", "key": 1}])
+        self.assertRaises(LoaderError, REQUEST_LOADER.load,
+                          '[{"action": "ACCEPT", "key": {}}]')
+        self.assertRaises(LoaderError, REQUEST_LOADER.load,
+                          [{"action": "ACCEPT", "key": {}}])
+        self.assertRaises(LoaderError, REQUEST_LOADER.load,
                           '[{"action": "ACCEPT", "from": "bad"}]')
         self.assertRaises(LoaderError, REQUEST_LOADER.load,
                           [{"action": "ACCEPT", "from": "bad"}])
         self.assertRaises(LoaderError, REQUEST_LOADER.load,
+                          [{"action": "ACCEPT", "key": "bad..name"}])
+        self.assertRaises(LoaderError, REQUEST_LOADER.load,
+                          [{"action": "ACCEPT", "key": "bad..name"}])
+        self.assertRaises(LoaderError, REQUEST_LOADER.load,
                           '[{"action": "ACCEPT", "from": null}]')
         self.assertRaises(LoaderError, REQUEST_LOADER.load,
                           [{"action": "ACCEPT", "from": None}])
@@ -237,6 +292,28 @@ class RequestACLTest(unittest.TestCase):
         self.assertEqual(REJECT, get_acl('32.1.13.184').execute(CONTEXT6))
         self.assertEqual(REJECT, get_acl_json('32.1.13.184').execute(CONTEXT6))
 
+        # TSIG checks, derived from dns_test.cc
+        self.assertEqual(ACCEPT, get_tsig_acl('key.example.com').\
+                             execute(get_context('192.0.2.1',
+                                                 'key.example.com')))
+        self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+                             execute(get_context('192.0.2.1',
+                                                 'badkey.example.com')))
+        self.assertEqual(ACCEPT, get_tsig_acl('key.example.com').\
+                             execute(get_context('2001:db8::1',
+                                                 'key.example.com')))
+        self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+                             execute(get_context('2001:db8::1',
+                                                 'badkey.example.com')))
+        self.assertEqual(REJECT, get_tsig_acl('key.example.com').\
+                             execute(CONTEXT4))
+        self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+                             execute(CONTEXT4))
+        self.assertEqual(REJECT, get_tsig_acl('key.example.com').\
+                             execute(CONTEXT6))
+        self.assertEqual(REJECT, get_tsig_acl_json('key.example.com').\
+                             execute(CONTEXT6))
+
         # A bit more complicated example, derived from resolver_config_unittest
         acl = REQUEST_LOADER.load('[ {"action": "ACCEPT", ' +
                                   '     "from": "192.0.2.1"},' +
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
new file mode 100644
index 0000000..c0f1e32
--- /dev/null
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -0,0 +1,4 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py sockcreator.py component.py special_component.py
+pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/__init__.py b/src/lib/python/isc/bind10/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
new file mode 100644
index 0000000..91b7064
--- /dev/null
+++ b/src/lib/python/isc/bind10/component.py
@@ -0,0 +1,647 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Module for managing components (abstraction of process). It allows starting
+them in given order, handling when they crash (what happens depends on kind
+of component) and shutting down. It also handles the configuration of this.
+
+Dependencies between them are not yet handled. It might turn out they are
+needed, in that case they will be added sometime in future.
+
+This framework allows for a single process to be started multiple times (by
+specifying multiple components with the same configuration). However, the rest
+of the system might not handle such situation well, so until it is made so,
+it would be better to start each process at most once.
+"""
+
+import isc.log
+from isc.log_messages.bind10_messages import *
+import time
+
+logger = isc.log.Logger("boss")
+DBG_TRACE_DATA = 20
+DBG_TRACE_DETAILED = 80
+
+START_CMD = 'start'
+STOP_CMD = 'stop'
+
+STARTED_OK_TIME = 10
+COMPONENT_RESTART_DELAY = 10
+
+STATE_DEAD = 'dead'
+STATE_STOPPED = 'stopped'
+STATE_RUNNING = 'running'
+
+class BaseComponent:
+    """
+    This represents a single component. This one is an abstract base class.
+    There are some methods which should be left untouched, but there are
+    others which define the interface only and should be overriden in
+    concrete implementations.
+
+    The component is in one of the three states:
+    - Stopped - it is either not started yet or it was explicitly stopped.
+      The component is created in this state (it must be asked to start
+      explicitly).
+    - Running - after start() was called, it started successfully and is
+      now running.
+    - Dead - it failed and can not be resurrected.
+
+    Init
+      |            stop()
+      |  +-----------------------+
+      |  |                       |
+      v  |  start()  success     |
+    Stopped --------+--------> Running <----------+
+                    |            |                |
+                    |failure     | failed()       |
+                    |            |                |
+                    v            |                |
+                    +<-----------+                |
+                    |                             |
+                    |  kind == dispensable or kind|== needed and failed late
+                    +-----------------------------+
+                    |
+                    | kind == core or kind == needed and it failed too soon
+                    v
+                  Dead
+
+    Note that there are still situations which are not handled properly here.
+    We don't recognize a component that is starting up, but not ready yet, one
+    that is already shutting down, impossible to stop, etc. We need to add more
+    states in future to handle it properly.
+    """
+    def __init__(self, boss, kind):
+        """
+        Creates the component in not running mode.
+
+        The parameters are:
+        - `boss` the boss object to plug into. The component needs to plug
+          into it to know when it failed, etc.
+        - `kind` is the kind of component. It may be one of:
+          * 'core' means the system can't run without it and it can't be
+            safely restarted. If it does not start, the system is brought
+            down. If it crashes, the system is turned off as well (with
+            non-zero exit status).
+          * 'needed' means the system is able to restart the component,
+            but it is vital part of the service (like auth server). If
+            it fails to start or crashes in less than 10s after the first
+            startup, the system is brought down. If it crashes later on,
+            it is restarted (see below).
+          * 'dispensable' means the component should be running, but if it
+            doesn't start or crashes for some reason, the system simply tries
+            to restart it and keeps running.
+
+        For components that are restarted, the restarts are not always
+        immediate; if the component has run for more than
+        COMPONENT_RESTART_DELAY (10) seconds, they are restarted right
+        away. If the component has not run that long, the system waits
+        until that time has passed (since the last start) until the
+        component is restarted.
+
+        Note that the __init__ method of child class should have these
+        parameters:
+
+        __init__(self, process, boss, kind, address=None, params=None)
+
+        The extra parameters are:
+        - `process` - which program should be started.
+        - `address` - the address on message bus, used to talk to the
+           component.
+        - `params` - parameters to the program.
+
+        The methods you should not override are:
+        - start
+        - stop
+        - failed
+        - running
+
+        You should override:
+        - _start_internal
+        - _stop_internal
+        - _failed_internal (if you like, the empty default might be suitable)
+        - name
+        - pid
+        - kill
+        """
+        if kind not in ['core', 'needed', 'dispensable']:
+            raise ValueError('Component kind can not be ' + kind)
+        self.__state = STATE_STOPPED
+        self._kind = kind
+        self._boss = boss
+        self._original_start_time = None
+
+    def start(self):
+        """
+        Start the component for the first time or restart it. It runs
+        _start_internal to actually start the component.
+
+        If you try to start an already running component, it raises ValueError.
+        """
+        if self.__state == STATE_DEAD:
+            raise ValueError("Can't resurrect already dead component")
+        if self.running():
+            raise ValueError("Can't start already running component")
+        logger.info(BIND10_COMPONENT_START, self.name())
+        self.__state = STATE_RUNNING
+        self.__start_time = time.time()
+        if self._original_start_time is None:
+            self._original_start_time = self.__start_time
+        self._restart_time = None
+        try:
+            self._start_internal()
+        except Exception as e:
+            logger.error(BIND10_COMPONENT_START_EXCEPTION, self.name(), e)
+            self.failed(None)
+            raise
+
+    def stop(self):
+        """
+        Stop the component. It calls _stop_internal to do the actual
+        stopping.
+
+        If you try to stop a component that is not running, it raises
+        ValueError.
+        """
+        # This is not tested. It talks with the outher world, which is out
+        # of scope of unittests.
+        if not self.running():
+            raise ValueError("Can't stop a component which is not running")
+        logger.info(BIND10_COMPONENT_STOP, self.name())
+        self.__state = STATE_STOPPED
+        self._stop_internal()
+
+    def failed(self, exit_code):
+        """
+        Notify the component it crashed. This will be called from boss object.
+
+        If you try to call failed on a component that is not running,
+        a ValueError is raised.
+
+        If it is a core component or needed component and it was started only
+        recently, the component will become dead and will ask the boss to shut
+        down with error exit status. A dead component can't be started again.
+
+        Otherwise the component will try to restart.
+
+        The exit code is used for logging. It might be None.
+
+        It calls _failed_internal internally.
+
+        Returns True if the process was immediately restarted, returns
+                False is the process was not restarted, either because
+                it is considered a core or needed component, or because
+                the component is to be restarted later.
+        """
+        logger.error(BIND10_COMPONENT_FAILED, self.name(), self.pid(),
+                     exit_code if exit_code is not None else "unknown")
+        if not self.running():
+            raise ValueError("Can't fail component that isn't running")
+        self.__state = STATE_STOPPED
+        self._failed_internal()
+        # If it is a core component or the needed component failed to start
+        # (including it stopped really soon)
+        if self._kind == 'core' or \
+            (self._kind == 'needed' and time.time() - STARTED_OK_TIME <
+             self._original_start_time):
+            self.__state = STATE_DEAD
+            logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
+            self._boss.component_shutdown(1)
+            return False
+        # This means we want to restart
+        else:
+            # if the component was only running for a short time, don't
+            # restart right away, but set a time it wants to restarted,
+            # and return that it wants to be restarted later
+            self.set_restart_time()
+            return self.restart()
+
+    def set_restart_time(self):
+        """Calculates and sets the time this component should be restarted.
+           Currently, it uses a very basic algorithm; start time +
+           RESTART_DELAY (10 seconds). This algorithm may be improved upon
+           in the future.
+        """
+        self._restart_at = self.__start_time + COMPONENT_RESTART_DELAY
+
+    def get_restart_time(self):
+        """Returns the time at which this component should be restarted."""
+        return self._restart_at
+
+    def restart(self, now = None):
+        """Restarts the component if it has a restart_time and if the value
+           of the restart_time is smaller than 'now'.
+
+           If the parameter 'now' is given, its value will be used instead
+           of calling time.time().
+
+           Returns True if the component is restarted, False if not."""
+        if now is None:
+            now = time.time()
+        if self.get_restart_time() is not None and\
+           self.get_restart_time() < now:
+            self.start()
+            return True
+        else:
+            return False
+
+    def running(self):
+        """
+        Informs if the component is currently running. It assumes the failed
+        is called whenever the component really fails and there might be some
+        time in between actual failure and the call, so this might be
+        inaccurate (it corresponds to the thing the object thinks is true, not
+        to the real "external" state).
+
+        It is not expected for this method to be overriden.
+        """
+        return self.__state == STATE_RUNNING
+
+    def _start_internal(self):
+        """
+        This method does the actual starting of a process. You need to override
+        this method to do the actual starting.
+
+        The ability to override this method presents some flexibility. It
+        allows processes started in a strange way, as well as components that
+        have no processes at all or components with multiple processes (in case
+        of multiple processes, care should be taken to make their
+        started/stopped state in sync and all the processes that can fail
+        should be registered).
+
+        You should register all the processes created by calling
+        self._boss.register_process.
+        """
+        pass
+
+    def _stop_internal(self):
+        """
+        This is the method that does the actual stopping of a component.
+        You need to provide it in a concrete implementation.
+
+        Also, note that it is a bad idea to raise exceptions from here.
+        Under such circumstance, the component will be considered stopped,
+        and the exception propagated, but we can't be sure it really is
+        dead.
+        """
+        pass
+
+    def _failed_internal(self):
+        """
+        This method is called from failed. You can replace it if you need
+        some specific behaviour when the component crashes. The default
+        implementation is empty.
+
+        Do not raise exceptions from here, please. The propper shutdown
+        would have not happened.
+        """
+        pass
+
+    def name(self):
+        """
+        Provides human readable name of the component, for logging and similar
+        purposes.
+
+        You need to provide this method in a concrete implementation.
+        """
+        pass
+
+    def pid(self):
+        """
+        Provides a PID of a process, if the component is real running process.
+        This may return None in cases when there's no process involved with the
+        component or in case the component is not started yet.
+
+        However, it is expected the component preserves the pid after it was
+        stopped, to ensure we can log it when we ask it to be killed (in case
+        the process refused to stop willingly).
+
+        You need to provide this method in a concrete implementation.
+        """
+        pass
+
+    def kill(self, forceful=False):
+        """
+        Kills the component.
+
+        If forcefull is true, it should do it in more direct and aggressive way
+        (for example by using SIGKILL or some equivalent). If it is false, more
+        peaceful way should be used (SIGTERM or equivalent).
+
+        You need to provide this method in a concrete implementation.
+        """
+        pass
+
+class Component(BaseComponent):
+    """
+    The most common implementation of a component. It can be used either
+    directly, and it will just start the process without anything special,
+    or slightly customised by passing a start_func hook to the __init__
+    to change the way it starts.
+
+    If such customisation isn't enough, you should inherit BaseComponent
+    directly. It is not recommended to override methods of this class
+    on one-by-one basis.
+    """
+    def __init__(self, process, boss, kind, address=None, params=None,
+                 start_func=None):
+        """
+        Creates the component in not running mode.
+
+        The parameters are:
+        - `process` is the name of the process to start.
+        - `boss` the boss object to plug into. The component needs to plug
+          into it to know when it failed, etc.
+        - `kind` is the kind of component. Refer to the documentation of
+          BaseComponent for details.
+        - `address` is the address on message bus. It is used to ask it to
+            shut down at the end. If you specialize the class for a component
+            that is shut down differently, it might be None.
+        - `params` is a list of parameters to pass to the process when it
+           starts. It is currently unused and this support is left out for
+           now.
+        - `start_func` is a function called when it is started. It is supposed
+           to start up the process and return a ProcInfo object describing it.
+           There's a sensible default if not provided, which just launches
+           the program without any special care.
+        """
+        BaseComponent.__init__(self, boss, kind)
+        self._process = process
+        self._start_func = start_func
+        self._address = address
+        self._params = params
+        self._procinfo = None
+
+    def _start_internal(self):
+        """
+        You can change the "core" of this function by setting self._start_func
+        to a function without parameters. Such function should start the
+        process and return the procinfo object describing the running process.
+
+        If you don't provide the _start_func, the usual startup by calling
+        boss.start_simple is performed.
+        """
+        # This one is not tested. For one, it starts a real process
+        # which is out of scope of unit tests, for another, it just
+        # delegates the starting to other function in boss (if a derived
+        # class does not provide an override function), which is tested
+        # by use.
+        if self._start_func is not None:
+            procinfo = self._start_func()
+        else:
+            # TODO Handle params, etc
+            procinfo = self._boss.start_simple(self._process)
+        self._procinfo = procinfo
+        self._boss.register_process(self.pid(), self)
+
+    def _stop_internal(self):
+        self._boss.stop_process(self._process, self._address)
+        # TODO Some way to wait for the process that doesn't want to
+        # terminate and kill it would prove nice (or add it to boss somewhere?)
+
+    def name(self):
+        """
+        Returns the name, derived from the process name.
+        """
+        return self._process
+
+    def pid(self):
+        return self._procinfo.pid if self._procinfo is not None else None
+
+    def kill(self, forcefull=False):
+        if self._procinfo is not None:
+            if forcefull:
+                self._procinfo.process.kill()
+            else:
+                self._procinfo.process.terminate()
+
+class Configurator:
+    """
+    This thing keeps track of configuration changes and starts and stops
+    components as it goes. It also handles the inital startup and final
+    shutdown.
+
+    Note that this will allow you to stop (by invoking reconfigure) a core
+    component. There should be some kind of layer protecting users from ever
+    doing so (users must not stop the config manager, message queue and stuff
+    like that or the system won't start again). However, if a user specifies
+    b10-auth as core, it is safe to stop that one.
+
+    The parameters are:
+    * `boss`: The boss we are managing for.
+    * `specials`: Dict of specially started components. Each item is a class
+      representing the component.
+
+    The configuration passed to it (by startup() and reconfigure()) is a
+    dictionary, each item represents one component that should be running.
+    The key is an unique identifier used to reference the component. The
+    value is a dictionary describing the component. All items in the
+    description is optional unless told otherwise and they are as follows:
+    * `special` - Some components are started in a special way. If it is
+      present, it specifies which class from the specials parameter should
+      be used to create the component. In that case, some of the following
+      items might be irrelevant, depending on the special component chosen.
+      If it is not there, the basic Component class is used.
+    * `process` - Name of the executable to start. If it is not present,
+      it defaults to the identifier of the component.
+    * `kind` - The kind of component, either of 'core', 'needed' and
+      'dispensable'. This specifies what happens if the component fails.
+      This one is required.
+    * `address` - The address of the component on message bus. It is used
+      to shut down the component. All special components currently either
+      know their own address or don't need one and ignore it. The common
+      components should provide this.
+    * `params` - The command line parameters of the executable. Defaults
+      to no parameters. It is currently unused.
+    * `priority` - When starting the component, the components with higher
+      priority are started before the ones with lower priority. If it is
+      not present, it defaults to 0.
+    """
+    def __init__(self, boss, specials = {}):
+        """
+        Initializes the configurator, but nothing is started yet.
+
+        The boss parameter is the boss object used to start and stop processes.
+        """
+        self.__boss = boss
+        # These could be __private, but as we access them from within unittest,
+        # it's more comfortable to have them just _protected.
+
+        # They are tuples (configuration, component)
+        self._components = {}
+        self._running = False
+        self.__specials = specials
+
+    def __reconfigure_internal(self, old, new):
+        """
+        Does a switch from one configuration to another.
+        """
+        self._run_plan(self._build_plan(old, new))
+
+    def startup(self, configuration):
+        """
+        Starts the first set of processes. This configuration is expected
+        to be hardcoded from the boss itself to start the configuration
+        manager and other similar things.
+        """
+        if self._running:
+            raise ValueError("Trying to start the component configurator " +
+                             "twice")
+        logger.info(BIND10_CONFIGURATOR_START)
+        self.__reconfigure_internal(self._components, configuration)
+        self._running = True
+
+    def shutdown(self):
+        """
+        Shuts everything down.
+
+        It is not expected that anyone would want to shutdown and then start
+        the configurator again, so we don't explicitly make sure that would
+        work. However, we are not aware of anything that would make it not
+        work either.
+        """
+        if not self._running:
+            raise ValueError("Trying to shutdown the component " +
+                             "configurator while it's not yet running")
+        logger.info(BIND10_CONFIGURATOR_STOP)
+        self._running = False
+        self.__reconfigure_internal(self._components, {})
+
+    def reconfigure(self, configuration):
+        """
+        Changes configuration from the current one to the provided. It
+        starts and stops all the components as needed (eg. if there's
+        a component that was not in the original configuration, it is
+        started, any component that was in the old and is not in the
+        new one is stopped).
+        """
+        if not self._running:
+            raise ValueError("Trying to reconfigure the component " +
+                             "configurator while it's not yet running")
+        logger.info(BIND10_CONFIGURATOR_RECONFIGURE)
+        self.__reconfigure_internal(self._components, configuration)
+
+    def _build_plan(self, old, new):
+        """
+        Builds a plan how to transfer from the old configuration to the new
+        one. It'll be sorted by priority and it will contain the components
+        (already created, but not started). Each command in the plan is a dict,
+        so it can be extended any time in future to include whatever
+        parameters each operation might need.
+
+        Any configuration problems are expected to be handled here, so the
+        plan is not yet run.
+        """
+        logger.debug(DBG_TRACE_DATA, BIND10_CONFIGURATOR_BUILD, old, new)
+        plan = []
+        # Handle removals of old components
+        for cname in old.keys():
+            if cname not in new:
+                component = self._components[cname][1]
+                if component.running():
+                    plan.append({
+                        'command': STOP_CMD,
+                        'component': component,
+                        'name': cname
+                    })
+        # Handle transitions of configuration of what is here
+        for cname in new.keys():
+            if cname in old:
+                for option in ['special', 'process', 'kind', 'address',
+                               'params']:
+                    if new[cname].get(option) != old[cname][0].get(option):
+                        raise NotImplementedError('Changing configuration of' +
+                                                  ' a running component is ' +
+                                                  'not yet supported. Remove' +
+                                                  ' and re-add ' + cname +
+                                                  ' to get the same effect')
+        # Handle introduction of new components
+        plan_add = []
+        for cname in new.keys():
+            if cname not in old:
+                component_config = new[cname]
+                creator = Component
+                if 'special' in component_config:
+                    # TODO: Better error handling
+                    creator = self.__specials[component_config['special']]
+                component = creator(component_config.get('process', cname),
+                                    self.__boss, component_config['kind'],
+                                    component_config.get('address'),
+                                    component_config.get('params'))
+                priority = component_config.get('priority', 0)
+                # We store tuples, priority first, so we can easily sort
+                plan_add.append((priority, {
+                    'component': component,
+                    'command': START_CMD,
+                    'name': cname,
+                    'config': component_config
+                }))
+        # Push the starts there sorted by priority
+        plan.extend([command for (_, command) in sorted(plan_add,
+                                                        reverse=True,
+                                                        key=lambda command:
+                                                            command[0])])
+        return plan
+
+    def running(self):
+        """
+        Returns if the configurator is running (eg. was started by startup and
+        not yet stopped by shutdown).
+        """
+        return self._running
+
+    def _run_plan(self, plan):
+        """
+        Run a plan, created beforehand by _build_plan.
+
+        With the start and stop commands, it also adds and removes components
+        in _components.
+
+        Currently implemented commands are:
+        * start
+        * stop
+
+        The plan is a list of tasks, each task is a dictionary. It must contain
+        at last 'component' (a component object to work with) and 'command'
+        (the command to do). Currently, both existing commands need 'name' of
+        the component as well (the identifier from configuration). The 'start'
+        one needs the 'config' to be there, which is the configuration description
+        of the component.
+        """
+        done = 0
+        try:
+            logger.debug(DBG_TRACE_DATA, BIND10_CONFIGURATOR_RUN, len(plan))
+            for task in plan:
+                component = task['component']
+                command = task['command']
+                logger.debug(DBG_TRACE_DETAILED, BIND10_CONFIGURATOR_TASK,
+                             command, component.name())
+                if command == START_CMD:
+                    component.start()
+                    self._components[task['name']] = (task['config'],
+                                                      component)
+                elif command == STOP_CMD:
+                    if component.running():
+                        component.stop()
+                    del self._components[task['name']]
+                else:
+                    # Can Not Happen (as the plans are generated by ourselves).
+                    # Therefore not tested.
+                    raise NotImplementedError("Command unknown: " + command)
+                done += 1
+        except:
+            logger.error(BIND10_CONFIGURATOR_PLAN_INTERRUPTED, done, len(plan))
+            raise
diff --git a/src/lib/python/isc/bind10/sockcreator.py b/src/lib/python/isc/bind10/sockcreator.py
new file mode 100644
index 0000000..c681d07
--- /dev/null
+++ b/src/lib/python/isc/bind10/sockcreator.py
@@ -0,0 +1,239 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import socket
+import struct
+import os
+import copy
+import subprocess
+import copy
+from isc.log_messages.bind10_messages import *
+from libutil_io_python import recv_fd
+
+logger = isc.log.Logger("boss")
+
+"""
+Module that comunicates with the privileged socket creator (b10-sockcreator).
+"""
+
+class CreatorError(Exception):
+    """
+    Exception for socket creator related errors.
+
+    It has two members: fatal and errno and they are just holding the values
+    passed to the __init__ function.
+    """
+
+    def __init__(self, message, fatal, errno=None):
+        """
+        Creates the exception. The message argument is the usual string.
+        The fatal one tells if the error is fatal (eg. the creator crashed)
+        and errno is the errno value returned from socket creator, if
+        applicable.
+        """
+        Exception.__init__(self, message)
+        self.fatal = fatal
+        self.errno = errno
+
+class Parser:
+    """
+    This class knows the sockcreator language. It creates commands, sends them
+    and receives the answers and parses them.
+
+    It does not start it, the communication channel must be provided.
+
+    In theory, anything here can throw a fatal CreatorError exception, but it
+    happens only in case something like the creator process crashes. Any other
+    occasions are mentioned explicitly.
+    """
+
+    def __init__(self, creator_socket):
+        """
+        Creates the parser. The creator_socket is socket to the socket creator
+        process that will be used for communication. However, the object must
+        have a read_fd() method to read the file descriptor. This slightly
+        unusual trick with modifying an object is used to easy up testing.
+
+        You can use WrappedSocket in production code to add the method to any
+        ordinary socket.
+        """
+        self.__socket = creator_socket
+        logger.info(BIND10_SOCKCREATOR_INIT)
+
+    def terminate(self):
+        """
+        Asks the creator process to terminate and waits for it to close the
+        socket. Does not return anything. Raises a CreatorError if there is
+        still data on the socket, if there is an error closing the socket,
+        or if the socket had already been closed.
+        """
+        if self.__socket is None:
+            raise CreatorError('Terminated already', True)
+        logger.info(BIND10_SOCKCREATOR_TERMINATE)
+        try:
+            self.__socket.sendall(b'T')
+            # Wait for an EOF - it will return empty data
+            eof = self.__socket.recv(1)
+            if len(eof) != 0:
+                raise CreatorError('Protocol error - data after terminated',
+                                   True)
+            self.__socket = None
+        except socket.error as se:
+            self.__socket = None
+            raise CreatorError(str(se), True)
+
+    def get_socket(self, address, port, socktype):
+        """
+        Asks the socket creator process to create a socket. Pass an address
+        (the isc.net.IPaddr object), port number and socket type (either
+        string "UDP", "TCP" or constant socket.SOCK_DGRAM or
+        socket.SOCK_STREAM.
+
+        Blocks until it is provided by the socket creator process (which
+        should be fast, as it is on localhost) and returns the file descriptor
+        number. It raises a CreatorError exception if the creation fails.
+        """
+        if self.__socket is None:
+            raise CreatorError('Socket requested on terminated creator', True)
+        # First, assemble the request from parts
+        logger.info(BIND10_SOCKET_GET, address, port, socktype)
+        data = b'S'
+        if socktype == 'UDP' or socktype == socket.SOCK_DGRAM:
+            data += b'U'
+        elif socktype == 'TCP' or socktype == socket.SOCK_STREAM:
+            data += b'T'
+        else:
+            raise ValueError('Unknown socket type: ' + str(socktype))
+        if address.family == socket.AF_INET:
+            data += b'4'
+        elif address.family == socket.AF_INET6:
+            data += b'6'
+        else:
+            raise ValueError('Unknown address family in address')
+        data += struct.pack('!H', port)
+        data += address.addr
+        try:
+            # Send the request
+            self.__socket.sendall(data)
+            answer = self.__socket.recv(1)
+            if answer == b'S':
+                # Success!
+                result = self.__socket.read_fd()
+                logger.info(BIND10_SOCKET_CREATED, result)
+                return result
+            elif answer == b'E':
+                # There was an error, read the error as well
+                error = self.__socket.recv(1)
+                errno = struct.unpack('i',
+                                      self.__read_all(len(struct.pack('i',
+                                                                      0))))
+                if error == b'S':
+                    cause = 'socket'
+                elif error == b'B':
+                    cause = 'bind'
+                else:
+                    self.__socket = None
+                    logger.fatal(BIND10_SOCKCREATOR_BAD_CAUSE, error)
+                    raise CreatorError('Unknown error cause' + str(answer), True)
+                logger.error(BIND10_SOCKET_ERROR, cause, errno[0],
+                             os.strerror(errno[0]))
+                raise CreatorError('Error creating socket on ' + cause, False,
+                                   errno[0])
+            else:
+                self.__socket = None
+                logger.fatal(BIND10_SOCKCREATOR_BAD_RESPONSE, answer)
+                raise CreatorError('Unknown response ' + str(answer), True)
+        except socket.error as se:
+            self.__socket = None
+            logger.fatal(BIND10_SOCKCREATOR_TRANSPORT_ERROR, str(se))
+            raise CreatorError(str(se), True)
+
+    def __read_all(self, length):
+        """
+        Keeps reading until length data is read or EOF or error happens.
+
+        EOF is considered error as well and throws a CreatorError.
+        """
+        result = b''
+        while len(result) < length:
+            data = self.__socket.recv(length - len(result))
+            if len(data) == 0:
+                self.__socket = None
+                logger.fatal(BIND10_SOCKCREATOR_EOF)
+                raise CreatorError('Unexpected EOF', True)
+            result += data
+        return result
+
+class WrappedSocket:
+    """
+    This class wraps a socket and adds a read_fd method, so it can be used
+    for the Parser class conveniently. It simply copies all its guts into
+    itself and implements the method.
+    """
+    def __init__(self, socket):
+        # Copy whatever can be copied from the socket
+        for name in dir(socket):
+            if name not in ['__class__', '__weakref__']:
+                setattr(self, name, getattr(socket, name))
+        # Keep the socket, so we can prevent it from being garbage-collected
+        # and closed before we are removed ourself
+        self.__orig_socket = socket
+
+    def read_fd(self):
+        """
+        Read the file descriptor from the socket.
+        """
+        return recv_fd(self.fileno())
+
+# FIXME: Any idea how to test this? Starting an external process doesn't sound
+# OK
+class Creator(Parser):
+    """
+    This starts the socket creator and allows asking for the sockets.
+
+    Note: __process shouldn't be reset once created.  See the note
+    of the SockCreator class for details.
+    """
+    def __init__(self, path):
+        (local, remote) = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
+        # Popen does not like, for some reason, having the same socket for
+        # stdin as well as stdout, so we dup it before passing it there.
+        remote2 = socket.fromfd(remote.fileno(), socket.AF_UNIX,
+                                socket.SOCK_STREAM)
+        env = copy.deepcopy(os.environ)
+        env['PATH'] = path
+        self.__process = subprocess.Popen(['b10-sockcreator'], env=env,
+                                          stdin=remote.fileno(),
+                                          stdout=remote2.fileno(),
+                                          preexec_fn=self.__preexec_work)
+        remote.close()
+        remote2.close()
+        Parser.__init__(self, WrappedSocket(local))
+
+    def __preexec_work(self):
+        """Function used before running a program that needs to run as a
+        different user."""
+        # Put us into a separate process group so we don't get
+        # SIGINT signals on Ctrl-C (the boss will shut everthing down by
+        # other means).
+        os.setpgrp()
+
+    def pid(self):
+        return self.__process.pid
+
+    def kill(self):
+        logger.warn(BIND10_SOCKCREATOR_KILL)
+        if self.__process is not None:
+            self.__process.kill()
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
new file mode 100644
index 0000000..dad10bb
--- /dev/null
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -0,0 +1,152 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from isc.bind10.component import Component, BaseComponent
+import isc.bind10.sockcreator
+from bind10_config import LIBEXECDIR
+import os
+import posix
+import isc.log
+from isc.log_messages.bind10_messages import *
+
+logger = isc.log.Logger("boss")
+
+class SockCreator(BaseComponent):
+    """
+    The socket creator component. Will start and stop the socket creator
+    accordingly.
+
+    Note: _creator shouldn't be reset explicitly once created.  The
+    underlying Popen object would then wait() the child process internally,
+    which breaks the assumption of the boss, who is expecting to see
+    the process die in waitpid().
+    """
+    def __init__(self, process, boss, kind, address=None, params=None):
+        BaseComponent.__init__(self, boss, kind)
+        self.__creator = None
+
+    def _start_internal(self):
+        self._boss.curproc = 'b10-sockcreator'
+        self.__creator = isc.bind10.sockcreator.Creator(LIBEXECDIR + ':' +
+                                                        os.environ['PATH'])
+        self._boss.register_process(self.pid(), self)
+        self._boss.log_started(self.pid())
+
+    def _stop_internal(self):
+        self.__creator.terminate()
+
+    def name(self):
+        return "Socket creator"
+
+    def pid(self):
+        """
+        Pid of the socket creator. It is provided differently from a usual
+        component.
+        """
+        return self.__creator.pid() if self.__creator else None
+
+    def kill(self, forceful=False):
+        # We don't really care about forceful here
+        if self.__creator:
+            self.__creator.kill()
+
+class Msgq(Component):
+    """
+    The message queue. Starting is passed to boss, stopping is not supported
+    and we leave the boss kill it by signal.
+    """
+    def __init__(self, process, boss, kind, address=None, params=None):
+        Component.__init__(self, process, boss, kind, None, None,
+                           boss.start_msgq)
+
+    def _stop_internal(self):
+        """
+        We can't really stop the message queue, as many processes may need
+        it for their shutdown and it doesn't have a shutdown command anyway.
+        But as it is stateless, it's OK to kill it.
+
+        So we disable this method (as the only time it could be called is
+        during shutdown) and wait for the boss to kill it in the next shutdown
+        step.
+
+        This actually breaks the recommendation at Component we shouldn't
+        override its methods one by one. This is a special case, because
+        we don't provide a different implementation, we completely disable
+        the method by providing an empty one. This can't hurt the internals.
+        """
+        pass
+
+class CfgMgr(Component):
+    def __init__(self, process, boss, kind, address=None, params=None):
+        Component.__init__(self, process, boss, kind, 'ConfigManager',
+                           None, boss.start_cfgmgr)
+
+class Auth(Component):
+    def __init__(self, process, boss, kind, address=None, params=None):
+        Component.__init__(self, process, boss, kind, 'Auth', None,
+                           boss.start_auth)
+
+class Resolver(Component):
+    def __init__(self, process, boss, kind, address=None, params=None):
+        Component.__init__(self, process, boss, kind, 'Resolver', None,
+                           boss.start_resolver)
+
+class CmdCtl(Component):
+    def __init__(self, process, boss, kind, address=None, params=None):
+        Component.__init__(self, process, boss, kind, 'Cmdctl', None,
+                           boss.start_cmdctl)
+
+class SetUID(BaseComponent):
+    """
+    This is a pseudo-component which drops root privileges when started
+    and sets the uid stored in boss.
+
+    This component does nothing when stopped.
+    """
+    def __init__(self, process, boss, kind, address=None, params=None):
+        BaseComponent.__init__(self, boss, kind)
+        self.uid = boss.uid
+
+    def _start_internal(self):
+        if self.uid is not None:
+            logger.info(BIND10_SETUID, self.uid)
+            posix.setuid(self.uid)
+
+    def _stop_internal(self): pass
+    def kill(self, forceful=False): pass
+
+    def name(self):
+        return "Set UID"
+
+    def pid(self):
+        return None
+
+def get_specials():
+    """
+    List of specially started components. Each one should be the class than can
+    be created for that component.
+    """
+    return {
+        'sockcreator': SockCreator,
+        'msgq': Msgq,
+        'cfgmgr': CfgMgr,
+        # TODO: Should these be replaced by configuration in config manager only?
+        # They should not have any parameters anyway
+        'auth': Auth,
+        'resolver': Resolver,
+        'cmdctl': CmdCtl,
+        # TODO: Remove when not needed, workaround before sockcreator works
+        'setuid': SetUID
+    }
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
new file mode 100644
index 0000000..df625b2
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -0,0 +1,29 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+#PYTESTS = args_test.py bind10_test.py
+# NOTE: this has a generated test found in the builddir
+PYTESTS = sockcreator_test.py component_test.py
+
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+	touch $(abs_top_srcdir)/.coverage 
+	rm -f .coverage
+	${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+	for pytest in $(PYTESTS) ; do \
+	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_srcdir)/src/bin:$(abs_top_builddir)/src/bin/bind10:$(abs_top_builddir)/src/lib/util/io/.libs \
+	BIND10_MSGQ_SOCKET_FILE=$(abs_top_builddir)/msgq_socket \
+		$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+	done
+
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
new file mode 100644
index 0000000..3b49b18
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -0,0 +1,1032 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Tests for the isc.bind10.component module and the
+isc.bind10.special_component module.
+"""
+
+import unittest
+import isc.log
+import time
+import copy
+from isc.bind10.component import Component, Configurator, BaseComponent
+import isc.bind10.special_component
+
+class TestError(Exception):
+    """
+    Just a private exception not known to anybody we use for our tests.
+    """
+    pass
+
+class BossUtils:
+    """
+    A class that brings some utilities for pretending we're Boss.
+    This is expected to be inherited by the testcases themselves.
+    """
+    def setUp(self):
+        """
+        Part of setup. Should be called by descendant's setUp.
+        """
+        self._shutdown = False
+        self._exitcode = None
+        # Back up the time function, we may want to replace it with something
+        self.__orig_time = isc.bind10.component.time.time
+
+    def tearDown(self):
+        """
+        Clean up after tests. If the descendant implements a tearDown, it
+        should call this method internally.
+        """
+        # Return the original time function
+        isc.bind10.component.time.time = self.__orig_time
+
+    def component_shutdown(self, exitcode=0):
+        """
+        Mock function to shut down. We just note we were asked to do so.
+        """
+        self._shutdown = True
+        self._exitcode = exitcode
+
+    def _timeskip(self):
+        """
+        Skip in time to future some 30s. Implemented by replacing the
+        time.time function in the tested module with function that returns
+        current time increased by 30.
+        """
+        tm = time.time()
+        isc.bind10.component.time.time = lambda: tm + 30
+
+    # Few functions that pretend to start something. Part of pretending of
+    # being boss.
+    def start_msgq(self):
+        pass
+
+    def start_cfgmgr(self):
+        pass
+
+    def start_auth(self):
+        pass
+
+    def start_resolver(self):
+        pass
+
+    def start_cmdctl(self):
+        pass
+
+class ComponentTests(BossUtils, unittest.TestCase):
+    """
+    Tests for the bind10.component.Component class
+    """
+    def setUp(self):
+        """
+        Pretend a newly started system.
+        """
+        BossUtils.setUp(self)
+        self._shutdown = False
+        self._exitcode = None
+        self.__start_called = False
+        self.__stop_called = False
+        self.__failed_called = False
+        self.__registered_processes = {}
+        self.__stop_process_params = None
+        self.__start_simple_params = None
+        # Pretending to be boss
+        self.uid = None
+        self.__uid_set = None
+
+    def __start(self):
+        """
+        Mock function, installed into the component into _start_internal.
+        This only notes the component was "started".
+        """
+        self.__start_called = True
+
+    def __stop(self):
+        """
+        Mock function, installed into the component into _stop_internal.
+        This only notes the component was "stopped".
+        """
+        self.__stop_called = True
+
+    def __fail(self):
+        """
+        Mock function, installed into the component into _failed_internal.
+        This only notes the component called the method.
+        """
+        self.__failed_called = True
+
+    def __fail_to_start(self):
+        """
+        Mock function. It can be installed into the component's _start_internal
+        to simulate a component that fails to start by raising an exception.
+        """
+        orig_started = self.__start_called
+        self.__start_called = True
+        if not orig_started:
+            # This one is from restart. Avoid infinite recursion for now.
+            # FIXME: We should use the restart scheduler to avoid it, not this.
+            raise TestError("Test error")
+
+    def __create_component(self, kind):
+        """
+        Convenience function that creates a component of given kind
+        and installs the mock functions into it so we can hook up into
+        its behaviour.
+
+        The process used is some nonsense, as this isn't used in this
+        kind of tests and we pretend to be the boss.
+        """
+        component = Component('No process', self, kind, 'homeless', [])
+        component._start_internal = self.__start
+        component._stop_internal = self.__stop
+        component._failed_internal = self.__fail
+        return component
+
+    def test_name(self):
+        """
+        Test the name provides whatever we passed to the constructor as process.
+        """
+        component = self.__create_component('core')
+        self.assertEqual('No process', component.name())
+
+    def test_guts(self):
+        """
+        Test the correct data are stored inside the component.
+        """
+        component = self.__create_component('core')
+        self.assertEqual(self, component._boss)
+        self.assertEqual("No process", component._process)
+        self.assertEqual(None, component._start_func)
+        self.assertEqual("homeless", component._address)
+        self.assertEqual([], component._params)
+
+    def __check_startup(self, component):
+        """
+        Check that nothing was called yet. A newly created component should
+        not get started right away, so this should pass after the creation.
+        """
+        self.assertFalse(self._shutdown)
+        self.assertFalse(self.__start_called)
+        self.assertFalse(self.__stop_called)
+        self.assertFalse(self.__failed_called)
+        self.assertFalse(component.running())
+        # We can't stop or fail the component yet
+        self.assertRaises(ValueError, component.stop)
+        self.assertRaises(ValueError, component.failed, 1)
+
+    def __check_started(self, component):
+        """
+        Check the component was started, but not stopped anyhow yet.
+        """
+        self.assertFalse(self._shutdown)
+        self.assertTrue(self.__start_called)
+        self.assertFalse(self.__stop_called)
+        self.assertFalse(self.__failed_called)
+        self.assertTrue(component.running())
+
+    def __check_dead(self, component):
+        """
+        Check the component is completely dead, and the server too.
+        """
+        self.assertTrue(self._shutdown)
+        self.assertTrue(self.__start_called)
+        self.assertFalse(self.__stop_called)
+        self.assertTrue(self.__failed_called)
+        self.assertEqual(1, self._exitcode)
+        self.assertFalse(component.running())
+        # Surely it can't be stopped when already dead
+        self.assertRaises(ValueError, component.stop)
+        # Nor started
+        self.assertRaises(ValueError, component.start)
+        # Nor it can fail again
+        self.assertRaises(ValueError, component.failed, 1)
+
+    def __check_restarted(self, component):
+        """
+        Check the component restarted successfully.
+
+        Reset the self.__start_called to False before calling the function when
+        the component should fail.
+        """
+        self.assertFalse(self._shutdown)
+        self.assertTrue(self.__start_called)
+        self.assertFalse(self.__stop_called)
+        self.assertTrue(self.__failed_called)
+        self.assertTrue(component.running())
+        # Check it can't be started again
+        self.assertRaises(ValueError, component.start)
+
+    def __check_not_restarted(self, component):
+        """
+        Check the component has not (yet) restarted successfully.
+        """
+        self.assertFalse(self._shutdown)
+        self.assertTrue(self.__start_called)
+        self.assertFalse(self.__stop_called)
+        self.assertTrue(self.__failed_called)
+        self.assertFalse(component.running())
+
+    def __do_start_stop(self, kind):
+        """
+        This is a body of a test. It creates a component of given kind,
+        then starts it and stops it. It checks correct functions are called
+        and the component's status is correct.
+
+        It also checks the component can't be started/stopped twice.
+        """
+        # Create it and check it did not do any funny stuff yet
+        component = self.__create_component(kind)
+        self.__check_startup(component)
+        # Start it and check it called the correct starting functions
+        component.start()
+        self.__check_started(component)
+        # Check it can't be started twice
+        self.assertRaises(ValueError, component.start)
+        # Stop it again and check
+        component.stop()
+        self.assertFalse(self._shutdown)
+        self.assertTrue(self.__start_called)
+        self.assertTrue(self.__stop_called)
+        self.assertFalse(self.__failed_called)
+        self.assertFalse(component.running())
+        # Check it can't be stopped twice
+        self.assertRaises(ValueError, component.stop)
+        # Or failed
+        self.assertRaises(ValueError, component.failed, 1)
+        # But it can be started again if it is stopped
+        # (no more checking here, just it doesn't crash)
+        component.start()
+
+    def test_start_stop_core(self):
+        """
+        A start-stop test for core component. See do_start_stop.
+        """
+        self.__do_start_stop('core')
+
+    def test_start_stop_needed(self):
+        """
+        A start-stop test for needed component. See do_start_stop.
+        """
+        self.__do_start_stop('needed')
+
+    def test_start_stop_dispensable(self):
+        """
+        A start-stop test for dispensable component. See do_start_stop.
+        """
+        self.__do_start_stop('dispensable')
+
+    def test_start_fail_core(self):
+        """
+        Start and then fail a core component. It should stop the whole server.
+        """
+        # Just ordinary startup
+        component = self.__create_component('core')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        # Pretend the component died
+        restarted = component.failed(1)
+        # Since it is a core component, it should not be restarted
+        self.assertFalse(restarted)
+        # It should bring down the whole server
+        self.__check_dead(component)
+
+    def test_start_fail_core_later(self):
+        """
+        Start and then fail a core component, but let it be running for longer time.
+        It should still stop the whole server.
+        """
+        # Just ordinary startup
+        component = self.__create_component('core')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        self._timeskip()
+        # Pretend the component died some time later
+        restarted = component.failed(1)
+        # Should not be restarted
+        self.assertFalse(restarted)
+        # Check the component is still dead
+        self.__check_dead(component)
+
+    def test_start_fail_needed(self):
+        """
+        Start and then fail a needed component. As this happens really soon after
+        being started, it is considered failure to start and should bring down the
+        whole server.
+        """
+        # Just ordinary startup
+        component = self.__create_component('needed')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        # Make it fail right away.
+        restarted = component.failed(1)
+        # Should not have restarted
+        self.assertFalse(restarted)
+        self.__check_dead(component)
+
+    def test_start_fail_needed_later(self):
+        """
+        Start and then fail a needed component. But the failure is later on, so
+        we just restart it and will be happy.
+        """
+        # Just ordinary startup
+        component = self.__create_component('needed')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        # Make it fail later on
+        self.__start_called = False
+        self._timeskip()
+        restarted = component.failed(1)
+        # Should have restarted
+        self.assertTrue(restarted)
+        self.__check_restarted(component)
+
+    def test_start_fail_dispensable(self):
+        """
+        Start and then fail a dispensable component. Should not get restarted.
+        """
+        # Just ordinary startup
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        # Make it fail right away
+        restarted = component.failed(1)
+        # Should signal that it did not restart
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
+
+    def test_start_fail_dispensable_later(self):
+        """
+        Start and then later on fail a dispensable component. Should just get
+        restarted.
+        """
+        # Just ordinary startup
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        # Make it fail later on
+        self._timeskip()
+        restarted = component.failed(1)
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
+        self.__check_restarted(component)
+
+    def test_start_fail_dispensable_restart_later(self):
+        """
+        Start and then fail a dispensable component, wait a bit and try to
+        restart. Should get restarted after the wait.
+        """
+        # Just ordinary startup
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        # Make it fail immediately
+        restarted = component.failed(1)
+        # should signal that it did not restart
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
+        self._timeskip()
+        # try to restart again
+        restarted = component.restart()
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
+        self.__check_restarted(component)
+
+    def test_fail_core(self):
+        """
+        Failure to start a core component. Should bring the system down
+        and the exception should get through.
+        """
+        component = self.__create_component('core')
+        self.__check_startup(component)
+        component._start_internal = self.__fail_to_start
+        self.assertRaises(TestError, component.start)
+        self.__check_dead(component)
+
+    def test_fail_needed(self):
+        """
+        Failure to start a needed component. Should bring the system down
+        and the exception should get through.
+        """
+        component = self.__create_component('needed')
+        self.__check_startup(component)
+        component._start_internal = self.__fail_to_start
+        self.assertRaises(TestError, component.start)
+        self.__check_dead(component)
+
+    def test_fail_dispensable(self):
+        """
+        Failure to start a dispensable component. The exception should get
+        through, but it should be restarted after a time skip.
+        """
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        component._start_internal = self.__fail_to_start
+        self.assertRaises(TestError, component.start)
+        # tell it to see if it must restart
+        restarted = component.restart()
+        # should not have restarted yet
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
+        self._timeskip()
+        # tell it to see if it must restart and do so, with our vision of time
+        restarted = component.restart()
+        # should have restarted now
+        self.assertTrue(restarted)
+        self.__check_restarted(component)
+
+    def test_component_start_time(self):
+        """
+        Check that original start time is set initially, and remains the same
+        after a restart, while the internal __start_time does change
+        """
+        # Just ordinary startup
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        self.assertIsNone(component._original_start_time)
+        component.start()
+        self.__check_started(component)
+
+        self.assertIsNotNone(component._original_start_time)
+        self.assertIsNotNone(component._BaseComponent__start_time)
+        original_start_time = component._original_start_time
+        start_time = component._BaseComponent__start_time
+        # Not restarted yet, so they should be the same
+        self.assertEqual(original_start_time, start_time)
+
+        self._timeskip()
+        # Make it fail
+        restarted = component.failed(1)
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
+        self.__check_restarted(component)
+
+        # original start time should not have changed
+        self.assertEqual(original_start_time, component._original_start_time)
+        # but actual start time should
+        self.assertNotEqual(start_time, component._BaseComponent__start_time)
+
+    def test_bad_kind(self):
+        """
+        Test the component rejects nonsensical kinds. This includes bad
+        capitalization.
+        """
+        for kind in ['Core', 'CORE', 'nonsense', 'need ed', 'required']:
+            self.assertRaises(ValueError, Component, 'No process', self, kind)
+
+    def test_pid_not_running(self):
+        """
+        Test that a componet that is not yet started doesn't have a PID.
+        But it won't fail if asked for and return None.
+        """
+        for component_type in [Component,
+                               isc.bind10.special_component.SockCreator,
+                               isc.bind10.special_component.Msgq,
+                               isc.bind10.special_component.CfgMgr,
+                               isc.bind10.special_component.Auth,
+                               isc.bind10.special_component.Resolver,
+                               isc.bind10.special_component.CmdCtl,
+                               isc.bind10.special_component.SetUID]:
+            component = component_type('none', self, 'needed')
+            self.assertIsNone(component.pid())
+
+    def test_kill_unstarted(self):
+        """
+        Try to kill the component if it's not started. Should not fail.
+
+        We do not try to kill a running component, as we should not start
+        it during unit tests.
+        """
+        component = Component('component', self, 'needed')
+        component.kill()
+        component.kill(True)
+
+    def register_process(self, pid, process):
+        """
+        Part of pretending to be a boss
+        """
+        self.__registered_processes[pid] = process
+
+    def test_component_attributes(self):
+        """
+        Test the default attributes of Component (not BaseComponent) and
+        some of the methods we might be allowed to call.
+        """
+        class TestProcInfo:
+            def __init__(self):
+                self.pid = 42
+        component = Component('component', self, 'needed', 'Address',
+                              ['hello'], TestProcInfo)
+        self.assertEqual('component', component._process)
+        self.assertEqual('component', component.name())
+        self.assertIsNone(component._procinfo)
+        self.assertIsNone(component.pid())
+        self.assertEqual(['hello'], component._params)
+        self.assertEqual('Address', component._address)
+        self.assertFalse(component.running())
+        self.assertEqual({}, self.__registered_processes)
+        component.start()
+        self.assertTrue(component.running())
+        # Some versions of unittest miss assertIsInstance
+        self.assertTrue(isinstance(component._procinfo, TestProcInfo))
+        self.assertEqual(42, component.pid())
+        self.assertEqual(component, self.__registered_processes.get(42))
+
+    def stop_process(self, process, address):
+        """
+        Part of pretending to be boss.
+        """
+        self.__stop_process_params = (process, address)
+
+    def start_simple(self, process):
+        """
+        Part of pretending to be boss.
+        """
+        self.__start_simple_params = process
+
+    def test_component_start_stop_internal(self):
+        """
+        Test the behavior of _stop_internal and _start_internal.
+        """
+        component = Component('component', self, 'needed', 'Address')
+        component.start()
+        self.assertTrue(component.running())
+        self.assertEqual('component', self.__start_simple_params)
+        component.stop()
+        self.assertFalse(component.running())
+        self.assertEqual(('component', 'Address'), self.__stop_process_params)
+
+    def test_component_kill(self):
+        """
+        Check the kill is propagated. The case when component wasn't started
+        yet is already tested elsewhere.
+        """
+        class Process:
+            def __init__(self):
+                self.killed = False
+                self.terminated = False
+            def kill(self):
+                self.killed = True
+            def terminate(self):
+                self.terminated = True
+        process = Process()
+        class ProcInfo:
+            def __init__(self):
+                self.process = process
+                self.pid = 42
+        component = Component('component', self, 'needed', 'Address',
+                              [], ProcInfo)
+        component.start()
+        self.assertTrue(component.running())
+        component.kill()
+        self.assertTrue(process.terminated)
+        self.assertFalse(process.killed)
+        process.terminated = False
+        component.kill(True)
+        self.assertTrue(process.killed)
+        self.assertFalse(process.terminated)
+
+    def setuid(self, uid):
+        self.__uid_set = uid
+
+    def test_setuid(self):
+        """
+        Some tests around the SetUID pseudo-component.
+        """
+        component = isc.bind10.special_component.SetUID(None, self, 'needed',
+                                                        None)
+        orig_setuid = isc.bind10.special_component.posix.setuid
+        isc.bind10.special_component.posix.setuid = self.setuid
+        component.start()
+        # No uid set in boss, nothing called.
+        self.assertIsNone(self.__uid_set)
+        # Doesn't do anything, but doesn't crash
+        component.stop()
+        component.kill()
+        component.kill(True)
+        self.uid = 42
+        component = isc.bind10.special_component.SetUID(None, self, 'needed',
+                                                        None)
+        component.start()
+        # This time, it get's called
+        self.assertEqual(42, self.__uid_set)
+
+class TestComponent(BaseComponent):
+    """
+    A test component. It does not start any processes or so, it just logs
+    information about what happens.
+    """
+    def __init__(self, owner, name, kind, address=None, params=None):
+        """
+        Initializes the component. The owner is the test that started the
+        component. The logging will happen into it.
+
+        The process is used as a name for the logging.
+        """
+        BaseComponent.__init__(self, owner, kind)
+        self.__owner = owner
+        self.__name = name
+        self.log('init')
+        self.log(kind)
+        self._address = address
+        self._params = params
+
+    def log(self, event):
+        """
+        Log an event into the owner. The owner can then check the correct
+        order of events that happened.
+        """
+        self.__owner.log.append((self.__name, event))
+
+    def _start_internal(self):
+        self.log('start')
+
+    def _stop_internal(self):
+        self.log('stop')
+
+    def _failed_internal(self):
+        self.log('failed')
+
+    def kill(self, forceful=False):
+        self.log('killed')
+
+class FailComponent(BaseComponent):
+    """
+    A mock component that fails whenever it is started.
+    """
+    def __init__(self, name, boss, kind, address=None, params=None):
+        BaseComponent.__init__(self, boss, kind)
+
+    def _start_internal(self):
+        raise TestError("test error")
+
+class ConfiguratorTest(BossUtils, unittest.TestCase):
+    """
+    Tests for the configurator.
+    """
+    def setUp(self):
+        """
+        Prepare some test data for the tests.
+        """
+        BossUtils.setUp(self)
+        self.log = []
+        # The core "hardcoded" configuration
+        self.__core = {
+            'core1': {
+                'priority': 5,
+                'process': 'core1',
+                'special': 'test',
+                'kind': 'core'
+            },
+            'core2': {
+                'process': 'core2',
+                'special': 'test',
+                'kind': 'core'
+            },
+            'core3': {
+                'process': 'core3',
+                'priority': 3,
+                'special': 'test',
+                'kind': 'core'
+            }
+        }
+        # How they should be started. They are created in the order they are
+        # found in the dict, but then they should be started by priority.
+        # This expects that the same dict returns its keys in the same order
+        # every time
+        self.__core_log_create = []
+        for core in self.__core.keys():
+            self.__core_log_create.append((core, 'init'))
+            self.__core_log_create.append((core, 'core'))
+        self.__core_log_start = [('core1', 'start'), ('core3', 'start'),
+                                 ('core2', 'start')]
+        self.__core_log = self.__core_log_create + self.__core_log_start
+        self.__specials = { 'test': self.__component_test }
+
+    def __component_test(self, process, boss, kind, address=None, params=None):
+        """
+        Create a test component. It will log events to us.
+        """
+        self.assertEqual(self, boss)
+        return TestComponent(self, process, kind, address, params)
+
+    def test_init(self):
+        """
+        Tests the configurator can be created and it does not create
+        any components yet, nor does it remember anything.
+        """
+        configurator = Configurator(self, self.__specials)
+        self.assertEqual([], self.log)
+        self.assertEqual({}, configurator._components)
+        self.assertFalse(configurator.running())
+
+    def test_run_plan(self):
+        """
+        Test the internal function of running plans. Just see it can handle
+        the commands in the given order. We see that by the log.
+
+        Also includes one that raises, so we see it just stops there.
+        """
+        # Prepare the configurator and the plan
+        configurator = Configurator(self, self.__specials)
+        started = self.__component_test('second', self, 'dispensable')
+        started.start()
+        stopped = self.__component_test('first', self, 'core')
+        configurator._components = {'second': started}
+        plan = [
+            {
+                'component': stopped,
+                'command': 'start',
+                'name': 'first',
+                'config': {'a': 1}
+            },
+            {
+                'component': started,
+                'command': 'stop',
+                'name': 'second',
+                'config': {}
+            },
+            {
+                'component': FailComponent('third', self, 'needed'),
+                'command': 'start',
+                'name': 'third',
+                'config': {}
+            },
+            {
+                'component': self.__component_test('fourth', self, 'core'),
+                'command': 'start',
+                'name': 'fourth',
+                'config': {}
+            }
+        ]
+        # Don't include the preparation into the log
+        self.log = []
+        # The error from the third component is propagated
+        self.assertRaises(TestError, configurator._run_plan, plan)
+        # The first two were handled, the rest not, due to the exception
+        self.assertEqual([('first', 'start'), ('second', 'stop')], self.log)
+        self.assertEqual({'first': ({'a': 1}, stopped)},
+                         configurator._components)
+
+    def __build_components(self, config):
+        """
+        Insert the components into the configuration to specify possible
+        Configurator._components.
+
+        Actually, the components are None, but we need something to be there.
+        """
+        result = {}
+        for name in config.keys():
+            result[name] = (config[name], None)
+        return result
+
+    def test_build_plan(self):
+        """
+        Test building the plan correctly. Not complete yet, this grows as we
+        add more ways of changing the plan.
+        """
+        configurator = Configurator(self, self.__specials)
+        plan = configurator._build_plan({}, self.__core)
+        # This should have created the components
+        self.assertEqual(self.__core_log_create, self.log)
+        self.assertEqual(3, len(plan))
+        for (task, name) in zip(plan, ['core1', 'core3', 'core2']):
+            self.assertTrue('component' in task)
+            self.assertEqual('start', task['command'])
+            self.assertEqual(name, task['name'])
+            component = task['component']
+            self.assertIsNone(component._address)
+            self.assertIsNone(component._params)
+
+        # A plan to go from older state to newer one containing more components
+        bigger = copy.copy(self.__core)
+        bigger['additional'] = {
+            'priority': 6,
+            'special': 'test',
+            'process': 'additional',
+            'kind': 'needed'
+        }
+        self.log = []
+        plan = configurator._build_plan(self.__build_components(self.__core),
+                                        bigger)
+        self.assertEqual([('additional', 'init'), ('additional', 'needed')],
+                         self.log)
+        self.assertEqual(1, len(plan))
+        self.assertTrue('component' in plan[0])
+        component = plan[0]['component']
+        self.assertEqual('start', plan[0]['command'])
+        self.assertEqual('additional', plan[0]['name'])
+
+        # Now remove the one component again
+        # We run the plan so the component is wired into internal structures
+        configurator._run_plan(plan)
+        self.log = []
+        plan = configurator._build_plan(self.__build_components(bigger),
+                                        self.__core)
+        self.assertEqual([], self.log)
+        self.assertEqual([{
+            'command': 'stop',
+            'name': 'additional',
+            'component': component
+        }], plan)
+
+        # We want to switch a component. So, prepare the configurator so it
+        # holds one
+        configurator._run_plan(configurator._build_plan(
+             self.__build_components(self.__core), bigger))
+        # Get a different configuration with a different component
+        different = copy.copy(self.__core)
+        different['another'] = {
+            'special': 'test',
+            'process': 'another',
+            'kind': 'dispensable'
+        }
+        self.log = []
+        plan = configurator._build_plan(self.__build_components(bigger),
+                                        different)
+        self.assertEqual([('another', 'init'), ('another', 'dispensable')],
+                         self.log)
+        self.assertEqual(2, len(plan))
+        self.assertEqual('stop', plan[0]['command'])
+        self.assertEqual('additional', plan[0]['name'])
+        self.assertTrue('component' in plan[0])
+        self.assertEqual('start', plan[1]['command'])
+        self.assertEqual('another', plan[1]['name'])
+        self.assertTrue('component' in plan[1])
+
+        # Some slightly insane plans, like missing process, having parameters,
+        # no special, etc
+        plan = configurator._build_plan({}, {
+            'component': {
+                'kind': 'needed',
+                'params': ["1", "2"],
+                'address': 'address'
+            }
+        })
+        self.assertEqual(1, len(plan))
+        self.assertEqual('start', plan[0]['command'])
+        self.assertEqual('component', plan[0]['name'])
+        component = plan[0]['component']
+        self.assertEqual('component', component.name())
+        self.assertEqual(["1", "2"], component._params)
+        self.assertEqual('address', component._address)
+        self.assertEqual('needed', component._kind)
+        # We don't use isinstance on purpose, it would allow a descendant
+        self.assertTrue(type(component) is Component)
+        plan = configurator._build_plan({}, {
+            'component': { 'kind': 'dispensable' }
+        })
+        self.assertEqual(1, len(plan))
+        self.assertEqual('start', plan[0]['command'])
+        self.assertEqual('component', plan[0]['name'])
+        component = plan[0]['component']
+        self.assertEqual('component', component.name())
+        self.assertIsNone(component._params)
+        self.assertIsNone(component._address)
+        self.assertEqual('dispensable', component._kind)
+
+    def __do_switch(self, option, value):
+        """
+        Start it with some component and then switch the configuration of the
+        component. This will probably raise, as it is not yet supported.
+        """
+        configurator = Configurator(self, self.__specials)
+        compconfig = {
+            'special': 'test',
+            'process': 'process',
+            'priority': 13,
+            'kind': 'core'
+        }
+        modifiedconfig = copy.copy(compconfig)
+        modifiedconfig[option] = value
+        return configurator._build_plan({'comp': (compconfig, None)},
+                                        {'comp': modifiedconfig})
+
+    def test_change_config_plan(self):
+        """
+        Test changing a configuration of one component. This is not yet
+        implemented and should therefore throw.
+        """
+        self.assertRaises(NotImplementedError, self.__do_switch, 'kind',
+                          'dispensable')
+        self.assertRaises(NotImplementedError, self.__do_switch, 'special',
+                          'not_a_test')
+        self.assertRaises(NotImplementedError, self.__do_switch, 'process',
+                          'different')
+        self.assertRaises(NotImplementedError, self.__do_switch, 'address',
+                          'different')
+        self.assertRaises(NotImplementedError, self.__do_switch, 'params',
+                          ['different'])
+        # This does not change anything on running component, so no need to
+        # raise
+        self.assertEqual([], self.__do_switch('priority', 5))
+        # Check against false positive, if the data are the same, but different
+        # instance
+        self.assertEqual([], self.__do_switch('special', 'test'))
+
+    def __check_shutdown_log(self):
+        """
+        Checks the log for shutting down from the core configuration.
+        """
+        # We know everything must be stopped, we know what it is.
+        # But we don't know the order, so we check everything is exactly
+        # once in the log
+        components = set(self.__core.keys())
+        for (name, command) in self.log:
+            self.assertEqual('stop', command)
+            self.assertTrue(name in components)
+            components.remove(name)
+        self.assertEqual(set([]), components, "Some component wasn't stopped")
+
+    def test_run(self):
+        """
+        Passes some configuration to the startup method and sees if
+        the components are started up. Then it reconfigures it with
+        empty configuration, the original configuration again and shuts
+        down.
+
+        It also checks the components are kept inside the configurator.
+        """
+        configurator = Configurator(self, self.__specials)
+        # Can't reconfigure nor stop yet
+        self.assertRaises(ValueError, configurator.reconfigure, self.__core)
+        self.assertRaises(ValueError, configurator.shutdown)
+        self.assertFalse(configurator.running())
+        # Start it
+        configurator.startup(self.__core)
+        self.assertEqual(self.__core_log, self.log)
+        for core in self.__core.keys():
+            self.assertTrue(core in configurator._components)
+            self.assertEqual(self.__core[core],
+                             configurator._components[core][0])
+        self.assertEqual(set(self.__core), set(configurator._components))
+        self.assertTrue(configurator.running())
+        # It can't be started twice
+        self.assertRaises(ValueError, configurator.startup, self.__core)
+
+        self.log = []
+        # Reconfigure - stop everything
+        configurator.reconfigure({})
+        self.assertEqual({}, configurator._components)
+        self.assertTrue(configurator.running())
+        self.__check_shutdown_log()
+
+        # Start it again
+        self.log = []
+        configurator.reconfigure(self.__core)
+        self.assertEqual(self.__core_log, self.log)
+        for core in self.__core.keys():
+            self.assertTrue(core in configurator._components)
+            self.assertEqual(self.__core[core],
+                             configurator._components[core][0])
+        self.assertEqual(set(self.__core), set(configurator._components))
+        self.assertTrue(configurator.running())
+
+        # Do a shutdown
+        self.log = []
+        configurator.shutdown()
+        self.assertEqual({}, configurator._components)
+        self.assertFalse(configurator.running())
+        self.__check_shutdown_log()
+
+        # It can't be stopped twice
+        self.assertRaises(ValueError, configurator.shutdown)
+
+    def test_sort_no_prio(self):
+        """
+        There was a bug if there were two things with the same priority
+        (or without priority), it failed as it couldn't compare the dicts
+        there. This tests it doesn't crash.
+        """
+        configurator = Configurator(self, self.__specials)
+        configurator._build_plan({}, {
+                                         "c1": { 'kind': 'dispensable'},
+                                         "c2": { 'kind': 'dispensable'}
+                                     })
+
+if __name__ == '__main__':
+    isc.log.init("bind10") # FIXME Should this be needed?
+    isc.log.resetUnitTestRootLogger()
+    unittest.main()
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
new file mode 100644
index 0000000..4453184
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -0,0 +1,327 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This test file is generated .py.in -> .py just to be in the build dir,
+# same as the rest of the tests. Saves a lot of stuff in makefile.
+
+"""
+Tests for the bind10.sockcreator module.
+"""
+
+import unittest
+import struct
+import socket
+from isc.net.addr import IPAddr
+import isc.log
+from libutil_io_python import send_fd
+from isc.bind10.sockcreator import Parser, CreatorError, WrappedSocket
+
+class FakeCreator:
+    """
+    Class emulating the socket to the socket creator. It can be given expected
+    data to receive (and check) and responses to give to the Parser class
+    during testing.
+    """
+
+    class InvalidPlan(Exception):
+        """
+        Raised when someone wants to recv when sending is planned or vice
+        versa.
+        """
+        pass
+
+    class InvalidData(Exception):
+        """
+        Raises when the data passed to sendall are not the same as expected.
+        """
+        pass
+
+    def __init__(self, plan):
+        """
+        Create the object. The plan variable contains list of expected actions,
+        in form:
+
+        [('r', 'Data to return from recv'), ('s', 'Data expected on sendall'),
+             , ('d', 'File descriptor number to return from read_sock'), ('e',
+             None), ...]
+
+        It modifies the array as it goes.
+        """
+        self.__plan = plan
+
+    def __get_plan(self, expected):
+        if len(self.__plan) == 0:
+            raise InvalidPlan('Nothing more planned')
+        (kind, data) = self.__plan[0]
+        if kind == 'e':
+            self.__plan.pop(0)
+            raise socket.error('False socket error')
+        if kind != expected:
+            raise InvalidPlan('Planned ' + kind + ', but ' + expected +
+                'requested')
+        return data
+
+    def recv(self, maxsize):
+        """
+        Emulate recv. Returs maxsize bytes from the current recv plan. If
+        there are data left from previous recv call, it is used first.
+
+        If no recv is planned, raises InvalidPlan.
+        """
+        data = self.__get_plan('r')
+        result, rest = data[:maxsize], data[maxsize:]
+        if len(rest) > 0:
+            self.__plan[0] = ('r', rest)
+        else:
+            self.__plan.pop(0)
+        return result
+
+    def read_fd(self):
+        """
+        Emulate the reading of file descriptor. Returns one from a plan.
+
+        It raises InvalidPlan if no socket is planned now.
+        """
+        fd = self.__get_plan('f')
+        self.__plan.pop(0)
+        return fd
+
+    def sendall(self, data):
+        """
+        Checks that the data passed are correct according to plan. It raises
+        InvalidData if the data differs or InvalidPlan when sendall is not
+        expected.
+        """
+        planned = self.__get_plan('s')
+        dlen = len(data)
+        prefix, rest = planned[:dlen], planned[dlen:]
+        if prefix != data:
+            raise InvalidData('Expected "' + str(prefix)+ '", got "' +
+                str(data) + '"')
+        if len(rest) > 0:
+            self.__plan[0] = ('s', rest)
+        else:
+            self.__plan.pop(0)
+
+    def all_used(self):
+        """
+        Returns if the whole plan was consumed.
+        """
+        return len(self.__plan) == 0
+
+class ParserTests(unittest.TestCase):
+    """
+    Testcases for the Parser class.
+
+    A lot of these test could be done by
+    `with self.assertRaises(CreatorError) as cm`. But some versions of python
+    take the scope wrong and don't work, so we use the primitive way of
+    try-except.
+    """
+    def __terminate(self):
+        creator = FakeCreator([('s', b'T'), ('r', b'')])
+        parser = Parser(creator)
+        self.assertEqual(None, parser.terminate())
+        self.assertTrue(creator.all_used())
+        return parser
+
+    def test_terminate(self):
+        """
+        Test if the command to terminate is correct and it waits for reading the
+        EOF.
+        """
+        self.__terminate()
+
+    def __terminate_raises(self, parser):
+        """
+        Check that terminate() raises a fatal exception.
+        """
+        try:
+            parser.terminate()
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(ce.fatal)
+            self.assertEqual(None, ce.errno)
+
+    def test_terminate_error1(self):
+        """
+        Test it reports an exception when there's error terminating the creator.
+        This one raises an error when receiving the EOF.
+        """
+        creator = FakeCreator([('s', b'T'), ('e', None)])
+        parser = Parser(creator)
+        self.__terminate_raises(parser)
+
+    def test_terminate_error2(self):
+        """
+        Test it reports an exception when there's error terminating the creator.
+        This one raises an error when sending data.
+        """
+        creator = FakeCreator([('e', None)])
+        parser = Parser(creator)
+        self.__terminate_raises(parser)
+
+    def test_terminate_error3(self):
+        """
+        Test it reports an exception when there's error terminating the creator.
+        This one sends data when it should have terminated.
+        """
+        creator = FakeCreator([('s', b'T'), ('r', b'Extra data')])
+        parser = Parser(creator)
+        self.__terminate_raises(parser)
+
+    def test_terminate_twice(self):
+        """
+        Test we can't terminate twice.
+        """
+        parser = self.__terminate()
+        self.__terminate_raises(parser)
+
+    def test_crash(self):
+        """
+        Tests that the parser correctly raises exception when it crashes
+        unexpectedly.
+        """
+        creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'')])
+        parser = Parser(creator)
+        try:
+            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(creator.all_used())
+            # Is the exception correct?
+            self.assertTrue(ce.fatal)
+            self.assertEqual(None, ce.errno)
+
+    def test_error(self):
+        """
+        Tests that the parser correctly raises non-fatal exception when
+        the socket can not be created.
+        """
+        # We split the int to see if it can cope with data coming in
+        # different packets
+        intpart = struct.pack('@i', 42)
+        creator = FakeCreator([('s', b'SU4\0\0\0\0\0\0'), ('r', b'ES' +
+            intpart[:1]), ('r', intpart[1:])])
+        parser = Parser(creator)
+        try:
+            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(creator.all_used())
+            # Is the exception correct?
+            self.assertFalse(ce.fatal)
+            self.assertEqual(42, ce.errno)
+
+    def __error(self, plan):
+        creator = FakeCreator(plan)
+        parser = Parser(creator)
+        try:
+            parser.get_socket(IPAddr('0.0.0.0'), 0, socket.SOCK_DGRAM)
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(creator.all_used())
+            self.assertTrue(ce.fatal)
+
+    def test_error_send(self):
+        self.__error([('e', None)])
+
+    def test_error_recv(self):
+        self.__error([('s', b'SU4\0\0\0\0\0\0'), ('e', None)])
+
+    def test_error_read_fd(self):
+        self.__error([('s', b'SU4\0\0\0\0\0\0'), ('r', b'S'), ('e', None)])
+
+    def __create(self, addr, socktype, encoded):
+        creator = FakeCreator([('s', b'S' + encoded), ('r', b'S'), ('f', 42)])
+        parser = Parser(creator)
+        self.assertEqual(42, parser.get_socket(IPAddr(addr), 42, socktype))
+
+    def test_create1(self):
+        self.__create('192.0.2.0', 'UDP', b'U4\0\x2A\xC0\0\x02\0')
+
+    def test_create2(self):
+        self.__create('2001:db8::', socket.SOCK_STREAM,
+            b'T6\0\x2A\x20\x01\x0d\xb8\0\0\0\0\0\0\0\0\0\0\0\0')
+
+    def test_create_terminated(self):
+        """
+        Test we can't request sockets after it was terminated.
+        """
+        parser = self.__terminate()
+        try:
+            parser.get_socket(IPAddr('0.0.0.0'), 0, 'UDP')
+            self.fail("Not raised")
+        except CreatorError as ce:
+            self.assertTrue(ce.fatal)
+            self.assertEqual(None, ce.errno)
+
+    def test_invalid_socktype(self):
+        """
+        Test invalid socket type is rejected
+        """
+        self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+                          IPAddr('0.0.0.0'), 42, 'RAW')
+
+    def test_invalid_family(self):
+        """
+        Test it rejects invalid address family.
+        """
+        # Note: this produces a bad logger output, since this address
+        # can not be converted to string, so the original message with
+        # placeholders is output. This should not happen in practice, so
+        # it is harmless.
+        addr = IPAddr('0.0.0.0')
+        addr.family = 42
+        self.assertRaises(ValueError, Parser(FakeCreator([])).get_socket,
+                          addr, 42, socket.SOCK_DGRAM)
+
+class WrapTests(unittest.TestCase):
+    """
+    Tests for the wrap_socket function.
+    """
+    def test_wrap(self):
+        # We construct two pairs of socket. The receiving side of one pair will
+        # be wrapped. Then we send one of the other pair through this pair and
+        # check the received one can be used as a socket
+
+        # The transport socket
+        (t1, t2) = socket.socketpair()
+        # The payload socket
+        (p1, p2) = socket.socketpair()
+
+        t2 = WrappedSocket(t2)
+
+        # Transfer the descriptor
+        send_fd(t1.fileno(), p1.fileno())
+        p1 = socket.fromfd(t2.read_fd(), socket.AF_UNIX, socket.SOCK_STREAM)
+
+        # Now, pass some data trough the socket
+        p1.send(b'A')
+        data = p2.recv(1)
+        self.assertEqual(b'A', data)
+
+        # Test the wrapping didn't hurt the socket's usual methods
+        t1.send(b'B')
+        data = t2.recv(1)
+        self.assertEqual(b'B', data)
+        t2.send(b'C')
+        data = t1.recv(1)
+        self.assertEqual(b'C', data)
+
+if __name__ == '__main__':
+    isc.log.init("bind10") # FIXME Should this be needed?
+    isc.log.resetUnitTestRootLogger()
+    unittest.main()
diff --git a/src/lib/python/isc/cc/data.py b/src/lib/python/isc/cc/data.py
index ce1bba0..76ef942 100644
--- a/src/lib/python/isc/cc/data.py
+++ b/src/lib/python/isc/cc/data.py
@@ -22,8 +22,22 @@
 
 import json
 
-class DataNotFoundError(Exception): pass
-class DataTypeError(Exception): pass
+class DataNotFoundError(Exception):
+    """Raised if an identifier does not exist according to a spec file,
+       or if an item is addressed that is not in the current (or default)
+       config (such as a nonexistent list or map element)"""
+    pass
+
+class DataAlreadyPresentError(Exception):
+    """Raised if there is an attemt to add an element to a list or a
+       map that is already present in that list or map (i.e. if 'add'
+       is used when it should be 'set')"""
+    pass
+
+class DataTypeError(Exception):
+    """Raised if there is an attempt to set an element that is of a
+       different type than the type specified in the specification."""
+    pass
 
 def remove_identical(a, b):
     """Removes the values from dict a that are the same as in dict b.
diff --git a/src/lib/python/isc/cc/tests/Makefile.am b/src/lib/python/isc/cc/tests/Makefile.am
index 4e49501..4c2acc0 100644
--- a/src/lib/python/isc/cc/tests/Makefile.am
+++ b/src/lib/python/isc/cc/tests/Makefile.am
@@ -10,7 +10,7 @@ EXTRA_DIST += test_session.py
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -23,7 +23,7 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python \
+	PYTHONPATH=$(COMMON_PYTHON_PATH) \
 	BIND10_TEST_SOCKET_FILE=$(builddir)/test_socket.sock \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/config/Makefile.am b/src/lib/python/isc/config/Makefile.am
index 312ad33..ef696fb 100644
--- a/src/lib/python/isc/config/Makefile.am
+++ b/src/lib/python/isc/config/Makefile.am
@@ -1,27 +1,31 @@
 SUBDIRS = . tests
 
 python_PYTHON = __init__.py ccsession.py cfgmgr.py config_data.py module_spec.py
-pyexec_DATA = cfgmgr_messages.py $(top_builddir)/src/lib/python/config_messages.py
-
 pythondir = $(pyexecdir)/isc/config
 
-# Define rule to build logging source files from message file
-cfgmgr_messages.py: cfgmgr_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message \
-	-p $(top_srcdir)/src/lib/python/isc/config/cfgmgr_messages.mes
-
-$(top_builddir)/src/lib/python/config_messages.py: config_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message \
-		-p -d $(top_builddir)/src/lib/python \
-		$(top_srcdir)/src/lib/python/isc/config/config_messages.mes
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+BUILT_SOURCES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+nodist_pylogmessage_PYTHON += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
 
-CLEANFILES =  cfgmgr_messages.py cfgmgr_messages.pyc
-CLEANFILES += $(top_builddir)/src/lib/python/config_messages.py
-CLEANFILES += $(top_builddir)/src/lib/python/config_messages.pyc
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.pyc
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/config_messages.pyc
 
 CLEANDIRS = __pycache__
 
 EXTRA_DIST = cfgmgr_messages.mes config_messages.mes
 
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/cfgmgr_messages.py : cfgmgr_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/cfgmgr_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/config_messages.py : config_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/config_messages.mes
+
 clean-local:
 	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/config/ccsession.py b/src/lib/python/isc/config/ccsession.py
index 06a7f0f..2d998ce 100644
--- a/src/lib/python/isc/config/ccsession.py
+++ b/src/lib/python/isc/config/ccsession.py
@@ -43,7 +43,7 @@ from isc.util.file import path_search
 import bind10_config
 from isc.log import log_config_update
 import json
-from config_messages import *
+from isc.log_messages.config_messages import *
 
 logger = isc.log.Logger("config")
 
@@ -91,6 +91,7 @@ COMMAND_CONFIG_UPDATE = "config_update"
 COMMAND_MODULE_SPECIFICATION_UPDATE = "module_specification_update"
 
 COMMAND_GET_COMMANDS_SPEC = "get_commands_spec"
+COMMAND_GET_STATISTICS_SPEC = "get_statistics_spec"
 COMMAND_GET_CONFIG = "get_config"
 COMMAND_SET_CONFIG = "set_config"
 COMMAND_GET_MODULE_SPEC = "get_module_spec"
@@ -142,7 +143,9 @@ class ModuleCCSession(ConfigData):
        callbacks are called when 'check_command' is called on the
        ModuleCCSession"""
        
-    def __init__(self, spec_file_name, config_handler, command_handler, cc_session=None, handle_logging_config=True):
+    def __init__(self, spec_file_name, config_handler, command_handler,
+                 cc_session=None, handle_logging_config=True,
+                 socket_file = None):
         """Initialize a ModuleCCSession. This does *NOT* send the
            specification and request the configuration yet. Use start()
            for that once the ModuleCCSession has been initialized.
@@ -164,6 +167,12 @@ class ModuleCCSession(ConfigData):
            logger manager when the logging configuration gets updated.
            The module does not need to do anything except intializing
            its loggers, and provide log messages. Defaults to true.
+
+           socket_file: If cc_session was none, this optional argument
+           specifies which socket file to use to connect to msgq. It
+           will be overridden by the environment variable
+           MSGQ_SOCKET_FILE. If none, and no environment variable is
+           set, it will use the system default.
         """
         module_spec = isc.config.module_spec_from_file(spec_file_name)
         ConfigData.__init__(self, module_spec)
@@ -174,7 +183,7 @@ class ModuleCCSession(ConfigData):
         self.set_command_handler(command_handler)
 
         if not cc_session:
-            self._session = Session()
+            self._session = Session(socket_file)
         else:
             self._session = cc_session
         self._session.group_subscribe(self._module_name, "*")
@@ -312,7 +321,7 @@ class ModuleCCSession(ConfigData):
         module_spec = isc.config.module_spec_from_file(spec_file_name)
         module_cfg = ConfigData(module_spec)
         module_name = module_spec.get_module_name()
-        self._session.group_subscribe(module_name);
+        self._session.group_subscribe(module_name)
 
         # Get the current config for that module now
         seq = self._session.group_sendmsg(create_command(COMMAND_GET_CONFIG, { "module_name": module_name }), "ConfigManager")
@@ -327,7 +336,7 @@ class ModuleCCSession(ConfigData):
             rcode, value = parse_answer(answer)
             if rcode == 0:
                 if value != None and module_spec.validate_config(False, value):
-                    module_cfg.set_local_config(value);
+                    module_cfg.set_local_config(value)
                     if config_update_callback is not None:
                         config_update_callback(value, module_cfg)
 
@@ -377,7 +386,7 @@ class ModuleCCSession(ConfigData):
                         if self.get_module_spec().validate_config(False,
                                                                   value,
                                                                   errors):
-                            self.set_local_config(value);
+                            self.set_local_config(value)
                             if self._config_handler:
                                 self._config_handler(value)
                         else:
@@ -414,8 +423,8 @@ class UIModuleCCSession(MultiConfigData):
             self.set_specification(isc.config.ModuleSpec(specs[module]))
 
     def update_specs_and_config(self):
-        self.request_specifications();
-        self.request_current_config();
+        self.request_specifications()
+        self.request_current_config()
 
     def request_current_config(self):
         """Requests the current configuration from the configuration
@@ -425,65 +434,144 @@ class UIModuleCCSession(MultiConfigData):
             raise ModuleCCSessionError("Bad config version")
         self._set_current_config(config)
 
-
-    def add_value(self, identifier, value_str = None):
-        """Add a value to a configuration list. Raises a DataTypeError
-           if the value does not conform to the list_item_spec field
-           of the module config data specification. If value_str is
-           not given, we add the default as specified by the .spec
-           file."""
-        module_spec = self.find_spec_part(identifier)
-        if (type(module_spec) != dict or "list_item_spec" not in module_spec):
-            raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list")
-
+    def _add_value_to_list(self, identifier, value, module_spec):
         cur_list, status = self.get_value(identifier)
         if not cur_list:
             cur_list = []
 
-        # Hmm. Do we need to check for duplicates?
-        value = None
-        if value_str is not None:
-            value = isc.cc.data.parse_value_str(value_str)
-        else:
+        if value is None:
             if "item_default" in module_spec["list_item_spec"]:
                 value = module_spec["list_item_spec"]["item_default"]
 
         if value is None:
-            raise isc.cc.data.DataNotFoundError("No value given and no default for " + str(identifier))
-            
+            raise isc.cc.data.DataNotFoundError(
+                "No value given and no default for " + str(identifier))
+
         if value not in cur_list:
             cur_list.append(value)
             self.set_value(identifier, cur_list)
+        else:
+            raise isc.cc.data.DataAlreadyPresentError(value +
+                                                      " already in "
+                                                      + identifier)
+
+    def _add_value_to_named_set(self, identifier, value, item_value):
+        if type(value) != str:
+            raise isc.cc.data.DataTypeError("Name for named_set " +
+                                            identifier +
+                                            " must be a string")
+        # fail on both None and empty string
+        if not value:
+            raise isc.cc.data.DataNotFoundError(
+                    "Need a name to add a new item to named_set " +
+                    str(identifier))
+        else:
+            cur_map, status = self.get_value(identifier)
+            if not cur_map:
+                cur_map = {}
+            if value not in cur_map:
+                cur_map[value] = item_value
+                self.set_value(identifier, cur_map)
+            else:
+                raise isc.cc.data.DataAlreadyPresentError(value +
+                                                          " already in "
+                                                          + identifier)
 
-    def remove_value(self, identifier, value_str):
-        """Remove a value from a configuration list. The value string
-           must be a string representation of the full item. Raises
-           a DataTypeError if the value at the identifier is not a list,
-           or if the given value_str does not match the list_item_spec
-           """
+    def add_value(self, identifier, value_str = None, set_value_str = None):
+        """Add a value to a configuration list. Raises a DataTypeError
+           if the value does not conform to the list_item_spec field
+           of the module config data specification. If value_str is
+           not given, we add the default as specified by the .spec
+           file. Raises a DataNotFoundError if the given identifier
+           is not specified in the specification as a map or list.
+           Raises a DataAlreadyPresentError if the specified element
+           already exists."""
         module_spec = self.find_spec_part(identifier)
-        if (type(module_spec) != dict or "list_item_spec" not in module_spec):
-            raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list")
+        if module_spec is None:
+            raise isc.cc.data.DataNotFoundError("Unknown item " + str(identifier))
+
+        # the specified element must be a list or a named_set
+        if 'list_item_spec' in module_spec:
+            value = None
+            # in lists, we might get the value with spaces, making it
+            # the third argument. In that case we interpret both as
+            # one big string meant as the value
+            if value_str is not None:
+                if set_value_str is not None:
+                    value_str += set_value_str
+                value = isc.cc.data.parse_value_str(value_str)
+            self._add_value_to_list(identifier, value, module_spec)
+        elif 'named_set_item_spec' in module_spec:
+            item_name = None
+            item_value = None
+            if value_str is not None:
+                item_name =  isc.cc.data.parse_value_str(value_str)
+            if set_value_str is not None:
+                item_value = isc.cc.data.parse_value_str(set_value_str)
+            else:
+                if 'item_default' in module_spec['named_set_item_spec']:
+                    item_value = module_spec['named_set_item_spec']['item_default']
+            self._add_value_to_named_set(identifier, item_name,
+                                         item_value)
+        else:
+            raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list or a named set")
 
-        if value_str is None:
-            # we are directly removing an list index
+    def _remove_value_from_list(self, identifier, value):
+        if value is None:
+            # we are directly removing a list index
             id, list_indices = isc.cc.data.split_identifier_list_indices(identifier)
             if list_indices is None:
-                raise DataTypeError("identifier in remove_value() does not contain a list index, and no value to remove")
+                raise isc.cc.data.DataTypeError("identifier in remove_value() does not contain a list index, and no value to remove")
             else:
                 self.set_value(identifier, None)
         else:
-            value = isc.cc.data.parse_value_str(value_str)
-            isc.config.config_data.check_type(module_spec, [value])
             cur_list, status = self.get_value(identifier)
-            #if not cur_list:
-            #    cur_list = isc.cc.data.find_no_exc(self.config.data, identifier)
             if not cur_list:
                 cur_list = []
-            if value in cur_list:
+            elif value in cur_list:
                 cur_list.remove(value)
             self.set_value(identifier, cur_list)
 
+    def _remove_value_from_named_set(self, identifier, value):
+        if value is None:
+            raise isc.cc.data.DataNotFoundError("Need a name to remove an item from named_set " + str(identifier))
+        elif type(value) != str:
+            raise isc.cc.data.DataTypeError("Name for named_set " + identifier + " must be a string")
+        else:
+            cur_map, status = self.get_value(identifier)
+            if not cur_map:
+                cur_map = {}
+            if value in cur_map:
+                del cur_map[value]
+                self.set_value(identifier, cur_map)
+            else:
+                raise isc.cc.data.DataNotFoundError(value + " not found in named_set " + str(identifier))
+
+    def remove_value(self, identifier, value_str):
+        """Remove a value from a configuration list or named set.
+        The value string must be a string representation of the full
+        item. Raises a DataTypeError if the value at the identifier
+        is not a list, or if the given value_str does not match the
+        list_item_spec """
+        module_spec = self.find_spec_part(identifier)
+        if module_spec is None:
+            raise isc.cc.data.DataNotFoundError("Unknown item " + str(identifier))
+
+        value = None
+        if value_str is not None:
+            value = isc.cc.data.parse_value_str(value_str)
+
+        if 'list_item_spec' in module_spec:
+            if value is not None:
+                isc.config.config_data.check_type(module_spec['list_item_spec'], value)
+            self._remove_value_from_list(identifier, value)
+        elif 'named_set_item_spec' in module_spec:
+            self._remove_value_from_named_set(identifier, value)
+        else:
+            raise isc.cc.data.DataNotFoundError(str(identifier) + " is not a list or a named_set")
+
+
+
     def commit(self):
         """Commit all local changes, send them through b10-cmdctl to
            the configuration manager"""
diff --git a/src/lib/python/isc/config/cfgmgr.py b/src/lib/python/isc/config/cfgmgr.py
index 18e001c..4d568be 100644
--- a/src/lib/python/isc/config/cfgmgr.py
+++ b/src/lib/python/isc/config/cfgmgr.py
@@ -32,7 +32,7 @@ from isc.config import ccsession, config_data, module_spec
 from isc.util.file import path_search
 import bind10_config
 import isc.log
-from cfgmgr_messages import *
+from isc.log_messages.cfgmgr_messages import *
 
 logger = isc.log.Logger("cfgmgr")
 
@@ -117,12 +117,13 @@ class ConfigManagerData:
             if file:
                 file.close();
         return config
-        
+
     def write_to_file(self, output_file_name = None):
         """Writes the current configuration data to a file. If
            output_file_name is not specified, the file used in
            read_from_file is used."""
         filename = None
+
         try:
             file = tempfile.NamedTemporaryFile(mode='w',
                                                prefix="b10-config.db.",
@@ -202,7 +203,7 @@ class ConfigManager:
 
     def notify_boss(self):
         """Notifies the Boss module that the Config Manager is running"""
-        self.cc.group_sendmsg({"running": "configmanager"}, "Boss")
+        self.cc.group_sendmsg({"running": "ConfigManager"}, "Boss")
 
     def set_module_spec(self, spec):
         """Adds a ModuleSpec"""
@@ -267,6 +268,19 @@ class ConfigManager:
                 commands[module_name] = self.module_specs[module_name].get_commands_spec()
         return commands
 
+    def get_statistics_spec(self, name = None):
+        """Returns a dict containing 'module_name': statistics_spec for
+           all modules. If name is specified, only that module will
+           be included"""
+        statistics = {}
+        if name:
+            if name in self.module_specs:
+                statistics[name] = self.module_specs[name].get_statistics_spec()
+        else:
+            for module_name in self.module_specs.keys():
+                statistics[module_name] = self.module_specs[module_name].get_statistics_spec()
+        return statistics
+
     def read_config(self):
         """Read the current configuration from the file specificied at init()"""
         try:
@@ -278,7 +292,7 @@ class ConfigManager:
             # ok, just start with an empty config
             self.config = ConfigManagerData(self.data_path,
                                             self.database_filename)
-        
+
     def write_config(self):
         """Write the current configuration to the file specificied at init()"""
         self.config.write_to_file()
@@ -432,7 +446,7 @@ class ConfigManager:
             answer = ccsession.create_answer(1, "Wrong number of arguments")
         if not answer:
             answer = ccsession.create_answer(1, "No answer message from " + cmd[0])
-            
+
         return answer
 
     def _handle_module_spec(self, spec):
@@ -442,7 +456,7 @@ class ConfigManager:
         # todo: error checking (like keyerrors)
         answer = {}
         self.set_module_spec(spec)
-        
+
         # We should make one general 'spec update for module' that
         # passes both specification and commands at once
         spec_update = ccsession.create_command(ccsession.COMMAND_MODULE_SPECIFICATION_UPDATE,
@@ -457,6 +471,8 @@ class ConfigManager:
         if cmd:
             if cmd == ccsession.COMMAND_GET_COMMANDS_SPEC:
                 answer = ccsession.create_answer(0, self.get_commands_spec())
+            elif cmd == ccsession.COMMAND_GET_STATISTICS_SPEC:
+                answer = ccsession.create_answer(0, self.get_statistics_spec())
             elif cmd == ccsession.COMMAND_GET_MODULE_SPEC:
                 answer = self._handle_get_module_spec(arg)
             elif cmd == ccsession.COMMAND_GET_CONFIG:
@@ -476,7 +492,7 @@ class ConfigManager:
         else:
             answer = ccsession.create_answer(1, "Unknown message format: " + str(msg))
         return answer
-        
+
     def run(self):
         """Runs the configuration manager."""
         self.running = True
diff --git a/src/lib/python/isc/config/config_data.py b/src/lib/python/isc/config/config_data.py
index 1efe4a9..b2cf048 100644
--- a/src/lib/python/isc/config/config_data.py
+++ b/src/lib/python/isc/config/config_data.py
@@ -145,6 +145,8 @@ def _find_spec_part_single(cur_spec, id_part):
             return cur_spec['list_item_spec']
         # not found
         raise isc.cc.data.DataNotFoundError(id + " not found")
+    elif type(cur_spec) == dict and 'named_set_item_spec' in cur_spec.keys():
+        return cur_spec['named_set_item_spec']
     elif type(cur_spec) == list:
         for cur_spec_item in cur_spec:
             if cur_spec_item['item_name'] == id:
@@ -191,11 +193,14 @@ def spec_name_list(spec, prefix="", recurse=False):
                     result.extend(spec_name_list(map_el['map_item_spec'], prefix + map_el['item_name'], recurse))
                 else:
                     result.append(prefix + name)
+        elif 'named_set_item_spec' in spec:
+            # we added a '/' above, but in this one case we don't want it
+            result.append(prefix[:-1])
         else:
             for name in spec:
                 result.append(prefix + name + "/")
                 if recurse:
-                    result.extend(spec_name_list(spec[name],name, recurse))
+                    result.extend(spec_name_list(spec[name], name, recurse))
     elif type(spec) == list:
         for list_el in spec:
             if 'item_name' in list_el:
@@ -207,7 +212,7 @@ def spec_name_list(spec, prefix="", recurse=False):
             else:
                 raise ConfigDataError("Bad specification")
     else:
-        raise ConfigDataError("Bad specication")
+        raise ConfigDataError("Bad specification")
     return result
 
 class ConfigData:
@@ -255,7 +260,7 @@ class ConfigData:
 
     def get_local_config(self):
         """Returns the non-default config values in a dict"""
-        return self.data;
+        return self.data
 
     def get_item_list(self, identifier = None, recurse = False):
         """Returns a list of strings containing the full identifiers of
@@ -412,7 +417,39 @@ class MultiConfigData:
                 item_id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
                 id_list = module + "/" + id_prefix + "/" + item_id
                 id_prefix += "/" + id_part
-                if list_indices is not None:
+                part_spec = find_spec_part(self._specifications[module].get_config_spec(), id_prefix)
+                if part_spec['item_type'] == 'named_set':
+                    # For named sets, the identifier is partly defined
+                    # by which values are actually present, and not
+                    # purely by the specification.
+                    # So if there is a part of the identifier left,
+                    # we need to look up the value, then see if that
+                    # contains the next part of the identifier we got
+                    if len(id_parts) == 0:
+                        if 'item_default' in part_spec:
+                            return part_spec['item_default']
+                        else:
+                            return None
+                    id_part = id_parts.pop(0)
+
+                    named_set_value, type = self.get_value(id_list)
+                    if id_part in named_set_value:
+                        if len(id_parts) > 0:
+                            # we are looking for the *default* value.
+                            # so if not present in here, we need to
+                            # lookup the one from the spec
+                            rest_of_id = "/".join(id_parts)
+                            result = isc.cc.data.find_no_exc(named_set_value[id_part], rest_of_id)
+                            if result is None:
+                                spec_part = self.find_spec_part(identifier)
+                                if 'item_default' in spec_part:
+                                    return spec_part['item_default']
+                            return result
+                        else:
+                            return named_set_value[id_part]
+                    else:
+                        return None
+                elif list_indices is not None:
                     # there's actually two kinds of default here for
                     # lists; they can have a default value (like an
                     # empty list), but their elements can  also have
@@ -449,7 +486,12 @@ class MultiConfigData:
                     
             spec = find_spec_part(self._specifications[module].get_config_spec(), id)
             if 'item_default' in spec:
-                return spec['item_default']
+                # one special case, named_set
+                if spec['item_type'] == 'named_set':
+                    print("is " + id_part + " in named set?")
+                    return spec['item_default']
+                else:
+                    return spec['item_default']
             else:
                 return None
 
@@ -473,7 +515,7 @@ class MultiConfigData:
             return value, self.CURRENT
         if default:
             value = self.get_default_value(identifier)
-            if value != None:
+            if value is not None:
                 return value, self.DEFAULT
         return None, self.NONE
 
@@ -493,7 +535,7 @@ class MultiConfigData:
                 spec_part_list = spec_part['list_item_spec']
                 list_value, status = self.get_value(identifier)
                 if list_value is None:
-                    raise isc.cc.data.DataNotFoundError(identifier)
+                    raise isc.cc.data.DataNotFoundError(identifier + " not found")
 
                 if type(list_value) != list:
                     # the identifier specified a single element
@@ -509,12 +551,38 @@ class MultiConfigData:
                         for i in range(len(list_value)):
                             self._append_value_item(result, spec_part_list, "%s[%d]" % (identifier, i), all)
             elif item_type == "map":
+                value, status = self.get_value(identifier)
                 # just show the specific contents of a map, we are
                 # almost never interested in just its name
                 spec_part_map = spec_part['map_item_spec']
                 self._append_value_item(result, spec_part_map, identifier, all)
+            elif item_type == "named_set":
+                value, status = self.get_value(identifier)
+
+                # show just the one entry, when either the map is empty,
+                # or when this is element is not requested specifically
+                if len(value.keys()) == 0:
+                    entry = _create_value_map_entry(identifier,
+                                                    item_type,
+                                                    {}, status)
+                    result.append(entry)
+                elif not first and not all:
+                    entry = _create_value_map_entry(identifier,
+                                                    item_type,
+                                                    None, status)
+                    result.append(entry)
+                else:
+                    spec_part_named_set = spec_part['named_set_item_spec']
+                    for entry in value:
+                        self._append_value_item(result,
+                                                spec_part_named_set,
+                                                identifier + "/" + entry,
+                                                all)
             else:
                 value, status = self.get_value(identifier)
+                if status == self.NONE and not spec_part['item_optional']:
+                    raise isc.cc.data.DataNotFoundError(identifier + " not found")
+
                 entry = _create_value_map_entry(identifier,
                                                 item_type,
                                                 value, status)
@@ -569,7 +637,7 @@ class MultiConfigData:
                     spec_part = spec_part['list_item_spec']
                 check_type(spec_part, value)
         else:
-            raise isc.cc.data.DataNotFoundError(identifier)
+            raise isc.cc.data.DataNotFoundError(identifier + " not found")
 
         # Since we do not support list diffs (yet?), we need to
         # copy the currently set list of items to _local_changes
@@ -579,15 +647,54 @@ class MultiConfigData:
         cur_id_part = '/'
         for id_part in id_parts:
             id, list_indices = isc.cc.data.split_identifier_list_indices(id_part)
+            cur_value, status = self.get_value(cur_id_part + id)
+            # Check if the value was there in the first place
+            # If we are at the final element, we do not care whether we found
+            # it, since if we have reached this point and it did not exist,
+            # it was apparently an optional value without a default.
+            if status == MultiConfigData.NONE and cur_id_part != "/" and\
+               cur_id_part + id != identifier:
+                raise isc.cc.data.DataNotFoundError(id_part +
+                                                    " not found in " +
+                                                    cur_id_part)
             if list_indices is not None:
-                cur_list, status = self.get_value(cur_id_part + id)
+                # And check if we don't set something outside of any
+                # list
+                cur_list = cur_value
+                for list_index in list_indices:
+                    if list_index >= len(cur_list):
+                        raise isc.cc.data.DataNotFoundError("No item " +
+                                  str(list_index) + " in " + id_part)
+                    else:
+                        cur_list = cur_list[list_index]
                 if status != MultiConfigData.LOCAL:
                     isc.cc.data.set(self._local_changes,
                                     cur_id_part + id,
-                                    cur_list)
+                                    cur_value)
             cur_id_part = cur_id_part + id_part + "/"
         isc.cc.data.set(self._local_changes, identifier, value)
- 
+
+    def _get_list_items(self, item_name):
+        """This method is used in get_config_item_list, to add list
+           indices and named_set names to the completion list. If
+           the given item_name is for a list or named_set, it'll
+           return a list of those (appended to item_name), otherwise
+           the list will only contain the item_name itself."""
+        spec_part = self.find_spec_part(item_name)
+        if 'item_type' in spec_part and \
+           spec_part['item_type'] == 'named_set':
+            subslash = ""
+            if spec_part['named_set_item_spec']['item_type'] == 'map' or\
+               spec_part['named_set_item_spec']['item_type'] == 'named_set':
+                subslash = "/"
+            values, status = self.get_value(item_name)
+            if len(values) > 0:
+                return [ item_name + "/" + v + subslash for v in values.keys() ]
+            else:
+                return [ item_name ]
+        else:
+            return [ item_name ]
+
     def get_config_item_list(self, identifier = None, recurse = False):
         """Returns a list of strings containing the item_names of
            the child items at the given identifier. If no identifier is
@@ -598,7 +705,11 @@ class MultiConfigData:
             if identifier.startswith("/"):
                 identifier = identifier[1:]
             spec = self.find_spec_part(identifier)
-            return spec_name_list(spec, identifier + "/", recurse)
+            spec_list = spec_name_list(spec, identifier + "/", recurse)
+            result_list = []
+            for spec_name in spec_list:
+                result_list.extend(self._get_list_items(spec_name))
+            return result_list
         else:
             if recurse:
                 id_list = []
diff --git a/src/lib/python/isc/config/module_spec.py b/src/lib/python/isc/config/module_spec.py
index 6171149..b79f928 100644
--- a/src/lib/python/isc/config/module_spec.py
+++ b/src/lib/python/isc/config/module_spec.py
@@ -23,6 +23,7 @@
 
 import json
 import sys
+import time
 
 import isc.cc.data
 
@@ -91,7 +92,7 @@ class ModuleSpec:
             return _validate_spec_list(data_def, full, data, errors)
         else:
             # no spec, always bad
-            if errors != None:
+            if errors is not None:
                 errors.append("No config_data specification")
             return False
 
@@ -117,6 +118,26 @@ class ModuleSpec:
 
         return False
 
+    def validate_statistics(self, full, stat, errors = None):
+        """Check whether the given piece of data conforms to this
+           data definition. If so, it returns True. If not, it will
+           return false. If errors is given, and is an array, a string
+           describing the error will be appended to it. The current
+           version stops as soon as there is one error so this list
+           will not be exhaustive. If 'full' is true, it also errors on
+           non-optional missing values. Set this to False if you want to
+           validate only a part of a statistics tree (like a list of
+           non-default values). Also it checks 'item_format' in case
+           of time"""
+        stat_spec = self.get_statistics_spec()
+        if stat_spec is not None:
+            return _validate_spec_list(stat_spec, full, stat, errors)
+        else:
+            # no spec, always bad
+            if errors is not None:
+                errors.append("No statistics specification")
+            return False
+
     def get_module_name(self):
         """Returns a string containing the name of the module as
            specified by the specification given at __init__()"""
@@ -152,6 +173,14 @@ class ModuleSpec:
         else:
             return None
     
+    def get_statistics_spec(self):
+        """Returns a dict representation of the statistics part of the
+           specification, or None if there is none."""
+        if 'statistics' in self._module_spec:
+            return self._module_spec['statistics']
+        else:
+            return None
+    
     def __str__(self):
         """Returns a string representation of the full specification"""
         return self._module_spec.__str__()
@@ -160,8 +189,9 @@ def _check(module_spec):
     """Checks the full specification. This is a dict that contains the
        element "module_spec", which is in itself a dict that
        must contain at least a "module_name" (string) and optionally
-       a "config_data" and a "commands" element, both of which are lists
-       of dicts. Raises a ModuleSpecError if there is a problem."""
+       a "config_data", a "commands" and a "statistics" element, all
+       of which are lists of dicts. Raises a ModuleSpecError if there
+       is a problem."""
     if type(module_spec) != dict:
         raise ModuleSpecError("data specification not a dict")
     if "module_name" not in module_spec:
@@ -173,6 +203,8 @@ def _check(module_spec):
         _check_config_spec(module_spec["config_data"])
     if "commands" in module_spec:
         _check_command_spec(module_spec["commands"])
+    if "statistics" in module_spec:
+        _check_statistics_spec(module_spec["statistics"])
 
 def _check_config_spec(config_data):
     # config data is a list of items represented by dicts that contain
@@ -229,7 +261,7 @@ def _check_item_spec(config_item):
     item_type = config_item["item_type"]
     if type(item_type) != str:
         raise ModuleSpecError("item_type in " + item_name + " is not a string: " + str(type(item_type)))
-    if item_type not in ["integer", "real", "boolean", "string", "list", "map", "any"]:
+    if item_type not in ["integer", "real", "boolean", "string", "list", "map", "named_set", "any"]:
         raise ModuleSpecError("unknown item_type in " + item_name + ": " + item_type)
     if "item_optional" in config_item:
         if type(config_item["item_optional"]) != bool:
@@ -263,39 +295,96 @@ def _check_item_spec(config_item):
             if type(map_item) != dict:
                 raise ModuleSpecError("map_item_spec element is not a dict")
             _check_item_spec(map_item)
+    if 'item_format' in config_item and 'item_default' in config_item:
+        item_format = config_item["item_format"]
+        item_default = config_item["item_default"]
+        if not _check_format(item_default, item_format):
+            raise ModuleSpecError(
+                "Wrong format for " + str(item_default) + " in " + str(item_name))
 
+def _check_statistics_spec(statistics):
+    # statistics is a list of items represented by dicts that contain
+    # things like "item_name", depending on the type they can have
+    # specific subitems
+    """Checks a list that contains the statistics part of the
+       specification. Raises a ModuleSpecError if there is a
+       problem."""
+    if type(statistics) != list:
+        raise ModuleSpecError("statistics is of type " + str(type(statistics))
+                              + ", not a list of items")
+    for stat_item in statistics:
+        _check_item_spec(stat_item)
+        # Additionally checks if there are 'item_title' and
+        # 'item_description'
+        for item in [ 'item_title',  'item_description' ]:
+            if item not in stat_item:
+                raise ModuleSpecError("no " + item + " in statistics item")
+
+def _check_format(value, format_name):
+    """Check if specified value and format are correct. Return True if
+       is is correct."""
+    # TODO: should be added other format types if necessary
+    time_formats = { 'date-time' : "%Y-%m-%dT%H:%M:%SZ",
+                     'date'      : "%Y-%m-%d",
+                     'time'      : "%H:%M:%S" }
+    for fmt in time_formats:
+        if format_name == fmt:
+            try:
+                # reverse check
+                return value == time.strftime(
+                    time_formats[fmt],
+                    time.strptime(value, time_formats[fmt]))
+            except (ValueError, TypeError):
+                break
+    return False
 
 def _validate_type(spec, value, errors):
     """Returns true if the value is of the correct type given the
        specification"""
     data_type = spec['item_type']
     if data_type == "integer" and type(value) != int:
-        if errors != None:
+        if errors is not None:
             errors.append(str(value) + " should be an integer")
         return False
     elif data_type == "real" and type(value) != float:
-        if errors != None:
+        if errors is not None:
             errors.append(str(value) + " should be a real")
         return False
     elif data_type == "boolean" and type(value) != bool:
-        if errors != None:
+        if errors is not None:
             errors.append(str(value) + " should be a boolean")
         return False
     elif data_type == "string" and type(value) != str:
-        if errors != None:
+        if errors is not None:
             errors.append(str(value) + " should be a string")
         return False
     elif data_type == "list" and type(value) != list:
-        if errors != None:
+        if errors is not None:
             errors.append(str(value) + " should be a list")
         return False
     elif data_type == "map" and type(value) != dict:
+        if errors is not None:
+            errors.append(str(value) + " should be a map")
+        return False
+    elif data_type == "named_set" and type(value) != dict:
         if errors != None:
             errors.append(str(value) + " should be a map")
         return False
     else:
         return True
 
+def _validate_format(spec, value, errors):
+    """Returns true if the value is of the correct format given the
+       specification. And also return true if no 'item_format'"""
+    if "item_format" in spec:
+        item_format = spec['item_format']
+        if not _check_format(value, item_format):
+            if errors is not None:
+                errors.append("format type of " + str(value)
+                              + " should be " + item_format)
+            return False
+    return True
+
 def _validate_item(spec, full, data, errors):
     if not _validate_type(spec, data, errors):
         return False
@@ -304,12 +393,24 @@ def _validate_item(spec, full, data, errors):
         for data_el in data:
             if not _validate_type(list_spec, data_el, errors):
                 return False
+            if not _validate_format(list_spec, data_el, errors):
+                return False
             if list_spec['item_type'] == "map":
                 if not _validate_item(list_spec, full, data_el, errors):
                     return False
     elif type(data) == dict:
-        if not _validate_spec_list(spec['map_item_spec'], full, data, errors):
-            return False
+        if 'map_item_spec' in spec:
+            if not _validate_spec_list(spec['map_item_spec'], full, data, errors):
+                return False
+        else:
+            named_set_spec = spec['named_set_item_spec']
+            for data_el in data.values():
+                if not _validate_type(named_set_spec, data_el, errors):
+                    return False
+                if not _validate_item(named_set_spec, full, data_el, errors):
+                    return False
+    elif not _validate_format(spec, data, errors):
+        return False
     return True
 
 def _validate_spec(spec, full, data, errors):
@@ -321,7 +422,7 @@ def _validate_spec(spec, full, data, errors):
     elif item_name in data:
         return _validate_item(spec, full, data[item_name], errors)
     elif full and not item_optional:
-        if errors != None:
+        if errors is not None:
             errors.append("non-optional item " + item_name + " missing")
         return False
     else:
@@ -346,7 +447,7 @@ def _validate_spec_list(module_spec, full, data, errors):
                 if spec_item["item_name"] == item_name:
                     found = True
             if not found and item_name != "version":
-                if errors != None:
+                if errors is not None:
                     errors.append("unknown item " + item_name)
                 validated = False
     return validated
diff --git a/src/lib/python/isc/config/tests/Makefile.am b/src/lib/python/isc/config/tests/Makefile.am
index 47ccc41..6670ee7 100644
--- a/src/lib/python/isc/config/tests/Makefile.am
+++ b/src/lib/python/isc/config/tests/Makefile.am
@@ -8,7 +8,7 @@ EXTRA_DIST += unittest_fakesession.py
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -21,7 +21,7 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/config \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/config \
 	B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
 	CONFIG_TESTDATA_PATH=$(abs_top_srcdir)/src/lib/config/tests/testdata \
 	CONFIG_WR_TESTDATA_PATH=$(abs_top_builddir)/src/lib/config/tests/testdata \
diff --git a/src/lib/python/isc/config/tests/ccsession_test.py b/src/lib/python/isc/config/tests/ccsession_test.py
index ada0c8a..8d616e2 100644
--- a/src/lib/python/isc/config/tests/ccsession_test.py
+++ b/src/lib/python/isc/config/tests/ccsession_test.py
@@ -695,6 +695,12 @@ class TestUIModuleCCSession(unittest.TestCase):
         fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
         return UIModuleCCSession(fake_conn)
 
+    def create_uccs_named_set(self, fake_conn):
+        module_spec = isc.config.module_spec_from_file(self.spec_file("spec32.spec"))
+        fake_conn.set_get_answer('/module_spec', { module_spec.get_module_name(): module_spec.get_full_spec()})
+        fake_conn.set_get_answer('/config_data', { 'version': BIND10_CONFIG_DATA_VERSION })
+        return UIModuleCCSession(fake_conn)
+
     def test_init(self):
         fake_conn = fakeUIConn()
         fake_conn.set_get_answer('/module_spec', {})
@@ -715,12 +721,14 @@ class TestUIModuleCCSession(unittest.TestCase):
     def test_add_remove_value(self):
         fake_conn = fakeUIConn()
         uccs = self.create_uccs2(fake_conn)
+
         self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, 1, "a")
         self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, "no_such_item", "a")
         self.assertRaises(isc.cc.data.DataNotFoundError, uccs.add_value, "Spec2/item1", "a")
         self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, 1, "a")
         self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, "no_such_item", "a")
         self.assertRaises(isc.cc.data.DataNotFoundError, uccs.remove_value, "Spec2/item1", "a")
+
         self.assertEqual({}, uccs._local_changes)
         uccs.add_value("Spec2/item5", "foo")
         self.assertEqual({'Spec2': {'item5': ['a', 'b', 'foo']}}, uccs._local_changes)
@@ -730,10 +738,87 @@ class TestUIModuleCCSession(unittest.TestCase):
         uccs.remove_value("Spec2/item5", "foo")
         uccs.add_value("Spec2/item5", "foo")
         self.assertEqual({'Spec2': {'item5': ['foo']}}, uccs._local_changes)
-        uccs.add_value("Spec2/item5", "foo")
+        self.assertRaises(isc.cc.data.DataAlreadyPresentError,
+                          uccs.add_value, "Spec2/item5", "foo")
         self.assertEqual({'Spec2': {'item5': ['foo']}}, uccs._local_changes)
+        self.assertRaises(isc.cc.data.DataNotFoundError,
+                          uccs.remove_value, "Spec2/item5[123]", None)
         uccs.remove_value("Spec2/item5[0]", None)
         self.assertEqual({'Spec2': {'item5': []}}, uccs._local_changes)
+        uccs.add_value("Spec2/item5", None);
+        self.assertEqual({'Spec2': {'item5': ['']}}, uccs._local_changes)
+        # Intending to empty a list element, but forget specifying the index.
+        self.assertRaises(isc.cc.data.DataTypeError,
+                          uccs.remove_value, "Spec2/item5", None)
+
+    def test_add_remove_value_named_set(self):
+        fake_conn = fakeUIConn()
+        uccs = self.create_uccs_named_set(fake_conn)
+        value, status = uccs.get_value("/Spec32/named_set_item")
+        self.assertEqual({'a': 1, 'b': 2}, value)
+
+        # make sure that removing from default actually removes it
+        uccs.remove_value("/Spec32/named_set_item", "a")
+        value, status = uccs.get_value("/Spec32/named_set_item")
+        self.assertEqual({'b': 2}, value)
+        self.assertEqual(uccs.LOCAL, status)
+
+        # ok, put it back now
+        uccs.add_value("/Spec32/named_set_item", "a")
+        uccs.set_value("/Spec32/named_set_item/a", 1)
+
+        uccs.add_value("/Spec32/named_set_item", "foo")
+        value, status = uccs.get_value("/Spec32/named_set_item")
+        self.assertEqual({'a': 1, 'b': 2, 'foo': 3}, value)
+
+        uccs.remove_value("/Spec32/named_set_item", "a")
+        uccs.remove_value("/Spec32/named_set_item", "foo")
+        value, status = uccs.get_value("/Spec32/named_set_item")
+        self.assertEqual({'b': 2}, value)
+
+        uccs.set_value("/Spec32/named_set_item/c", 5)
+        value, status = uccs.get_value("/Spec32/named_set_item")
+        self.assertEqual({"b": 2, "c": 5}, value)
+
+        self.assertRaises(isc.cc.data.DataNotFoundError,
+                          uccs.set_value,
+                          "/Spec32/named_set_item/no_such_item/a",
+                          4)
+        self.assertRaises(isc.cc.data.DataNotFoundError,
+                          uccs.remove_value, "/Spec32/named_set_item",
+                          "no_such_item")
+
+    def test_set_value_named_set(self):
+        fake_conn = fakeUIConn()
+        uccs = self.create_uccs_named_set(fake_conn)
+        value, status = uccs.get_value("/Spec32/named_set_item2")
+        self.assertEqual({}, value)
+        self.assertEqual(status, uccs.DEFAULT)
+
+        # Try setting a value that is optional but has no default
+        uccs.add_value("/Spec32/named_set_item2", "new1")
+        uccs.set_value("/Spec32/named_set_item2/new1/first", 3)
+        # Different method to add a new element
+        uccs.set_value("/Spec32/named_set_item2/new2", { "second": 4 })
+
+        value, status = uccs.get_value("/Spec32/named_set_item2")
+        self.assertEqual({ "new1": {"first": 3 }, "new2": {"second": 4}},
+                         value)
+        self.assertEqual(status, uccs.LOCAL)
+
+        uccs.set_value("/Spec32/named_set_item2/new1/second", "foo")
+
+        value, status = uccs.get_value("/Spec32/named_set_item2")
+        self.assertEqual({ "new1": {"first": 3, "second": "foo" },
+                           "new2": {"second": 4}},
+                         value)
+        self.assertEqual(status, uccs.LOCAL)
+
+        # make sure using a bad name still fails
+        self.assertRaises(isc.cc.data.DataNotFoundError, uccs.set_value,
+                          "/Spec32/named_set_item2/doesnotexist/first", 3)
+
+
 
     def test_commit(self):
         fake_conn = fakeUIConn()
diff --git a/src/lib/python/isc/config/tests/cfgmgr_test.py b/src/lib/python/isc/config/tests/cfgmgr_test.py
index 0a9e2d3..589a398 100644
--- a/src/lib/python/isc/config/tests/cfgmgr_test.py
+++ b/src/lib/python/isc/config/tests/cfgmgr_test.py
@@ -37,7 +37,7 @@ class TestConfigManagerData(unittest.TestCase):
         It shouldn't append the data path to it.
         """
         abs_path = self.data_path + os.sep + "b10-config-imaginary.db"
-        data = ConfigManagerData(os.getcwd(), abs_path)
+        data = ConfigManagerData(self.data_path, abs_path)
         self.assertEqual(abs_path, data.db_filename)
         self.assertEqual(self.data_path, data.data_path)
 
@@ -88,7 +88,7 @@ class TestConfigManagerData(unittest.TestCase):
         self.assertEqual(cfd1, cfd2)
         cfd2.data['test'] = { 'a': [ 1, 2, 3]}
         self.assertNotEqual(cfd1, cfd2)
-        
+
 
 class TestConfigManager(unittest.TestCase):
 
@@ -128,7 +128,7 @@ class TestConfigManager(unittest.TestCase):
         msg = self.fake_session.get_message("Boss", None)
         self.assert_(msg)
         # this one is actually wrong, but 'current status quo'
-        self.assertEqual(msg, {"running": "configmanager"})
+        self.assertEqual(msg, {"running": "ConfigManager"})
 
     def test_set_module_spec(self):
         module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
@@ -198,8 +198,8 @@ class TestConfigManager(unittest.TestCase):
         self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
         config_spec = self.cm.get_config_spec('Spec2')
         self.assertEqual(config_spec['Spec2'], module_spec.get_config_spec())
-        
-    
+
+
     def test_get_commands_spec(self):
         commands_spec = self.cm.get_commands_spec()
         self.assertEqual(commands_spec, {})
@@ -219,6 +219,25 @@ class TestConfigManager(unittest.TestCase):
         commands_spec = self.cm.get_commands_spec('Spec2')
         self.assertEqual(commands_spec['Spec2'], module_spec.get_commands_spec())
 
+    def test_get_statistics_spec(self):
+        statistics_spec = self.cm.get_statistics_spec()
+        self.assertEqual(statistics_spec, {})
+        module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec1.spec")
+        self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+        self.cm.set_module_spec(module_spec)
+        self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+        statistics_spec = self.cm.get_statistics_spec()
+        self.assertEqual(statistics_spec, { 'Spec1': None })
+        self.cm.remove_module_spec('Spec1')
+        module_spec = isc.config.module_spec.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
+        self.assert_(module_spec.get_module_name() not in self.cm.module_specs)
+        self.cm.set_module_spec(module_spec)
+        self.assert_(module_spec.get_module_name() in self.cm.module_specs)
+        statistics_spec = self.cm.get_statistics_spec()
+        self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+        statistics_spec = self.cm.get_statistics_spec('Spec2')
+        self.assertEqual(statistics_spec['Spec2'], module_spec.get_statistics_spec())
+
     def test_read_config(self):
         self.assertEqual(self.cm.config.data, {'version': config_data.BIND10_CONFIG_DATA_VERSION})
         self.cm.read_config()
@@ -231,7 +250,7 @@ class TestConfigManager(unittest.TestCase):
     def test_write_config(self):
         # tested in ConfigManagerData tests
         pass
-    
+
     def _handle_msg_helper(self, msg, expected_answer):
         answer = self.cm.handle_msg(msg)
         self.assertEqual(expected_answer, answer)
@@ -241,6 +260,7 @@ class TestConfigManager(unittest.TestCase):
         self._handle_msg_helper("", { 'result': [ 1, 'Unknown message format: ']})
         self._handle_msg_helper({ "command": [ "badcommand" ] }, { 'result': [ 1, "Unknown command: badcommand"]})
         self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, {} ]})
+        self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, {} ]})
         self._handle_msg_helper({ "command": [ "get_module_spec" ] }, { 'result': [ 0, {} ]})
         self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "Spec2" } ] }, { 'result': [ 0, {} ]})
         #self._handle_msg_helper({ "command": [ "get_module_spec", { "module_name": "nosuchmodule" } ] },
@@ -318,7 +338,7 @@ class TestConfigManager(unittest.TestCase):
         #                 self.fake_session.get_message(self.name, None))
         #self.assertEqual({'version': 1, 'TestModule': {'test': 124}}, self.cm.config.data)
         #
-        self._handle_msg_helper({ "command": 
+        self._handle_msg_helper({ "command":
                                   ["module_spec", self.spec.get_full_spec()]
                                 },
                                 {'result': [0]})
@@ -329,6 +349,7 @@ class TestConfigManager(unittest.TestCase):
                                                { "module_name" : "Spec2" } ] },
                                 { 'result': [ 0, self.spec.get_full_spec() ] })
         self._handle_msg_helper({ "command": [ "get_commands_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_commands_spec() } ]})
+        self._handle_msg_helper({ "command": [ "get_statistics_spec" ] }, { 'result': [ 0, { self.spec.get_module_name(): self.spec.get_statistics_spec() } ]})
         # re-add this once we have new way to propagate spec changes (1 instead of the current 2 messages)
         #self.assertEqual(len(self.fake_session.message_queue), 2)
         # the name here is actually wrong (and hardcoded), but needed in the current version
@@ -338,7 +359,7 @@ class TestConfigManager(unittest.TestCase):
         #self.assertEqual({'commands_update': [ self.name, self.commands ] },
         #                 self.fake_session.get_message("Cmdctl", None))
 
-        self._handle_msg_helper({ "command": 
+        self._handle_msg_helper({ "command":
                                   ["shutdown"]
                                 },
                                 {'result': [0]})
@@ -450,6 +471,7 @@ class TestConfigManager(unittest.TestCase):
 
     def test_run(self):
         self.fake_session.group_sendmsg({ "command": [ "get_commands_spec" ] }, "ConfigManager")
+        self.fake_session.group_sendmsg({ "command": [ "get_statistics_spec" ] }, "ConfigManager")
         self.fake_session.group_sendmsg({ "command": [ "shutdown" ] }, "ConfigManager")
         self.cm.run()
         pass
diff --git a/src/lib/python/isc/config/tests/config_data_test.py b/src/lib/python/isc/config/tests/config_data_test.py
index fc1bffa..bede625 100644
--- a/src/lib/python/isc/config/tests/config_data_test.py
+++ b/src/lib/python/isc/config/tests/config_data_test.py
@@ -236,6 +236,7 @@ class TestConfigData(unittest.TestCase):
         value, default = self.cd.get_value("item6/value2")
         self.assertEqual(None, value)
         self.assertEqual(False, default)
+        self.assertRaises(isc.cc.data.DataNotFoundError, self.cd.get_value, "item6/no_such_item")
 
     def test_get_default_value(self):
         self.assertEqual(1, self.cd.get_default_value("item1"))
@@ -360,7 +361,7 @@ class TestMultiConfigData(unittest.TestCase):
 
     def test_get_current_config(self):
         cf = { 'module1': { 'item1': 2, 'item2': True } }
-        self.mcd._set_current_config(cf);
+        self.mcd._set_current_config(cf)
         self.assertEqual(cf, self.mcd.get_current_config())
 
     def test_get_local_changes(self):
@@ -421,6 +422,17 @@ class TestMultiConfigData(unittest.TestCase):
         value = self.mcd.get_default_value("Spec2/no_such_item/asdf")
         self.assertEqual(None, value)
 
+        module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+        self.mcd.set_specification(module_spec)
+        value = self.mcd.get_default_value("Spec32/named_set_item")
+        self.assertEqual({ 'a': 1, 'b': 2}, value)
+        value = self.mcd.get_default_value("Spec32/named_set_item/a")
+        self.assertEqual(1, value)
+        value = self.mcd.get_default_value("Spec32/named_set_item/b")
+        self.assertEqual(2, value)
+        value = self.mcd.get_default_value("Spec32/named_set_item/no_such_item")
+        self.assertEqual(None, value)
+
     def test_get_value(self):
         module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
         self.mcd.set_specification(module_spec)
@@ -544,6 +556,29 @@ class TestMultiConfigData(unittest.TestCase):
         maps = self.mcd.get_value_maps("/Spec22/value9")
         self.assertEqual(expected, maps)
 
+    def test_get_value_maps_named_set(self):
+        module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+        self.mcd.set_specification(module_spec)
+        maps = self.mcd.get_value_maps()
+        self.assertEqual([{'default': False, 'type': 'module',
+                           'name': 'Spec32', 'value': None,
+                           'modified': False}], maps)
+        maps = self.mcd.get_value_maps("/Spec32/named_set_item")
+        self.assertEqual([{'default': True, 'type': 'integer',
+                           'name': 'Spec32/named_set_item/a',
+                           'value': 1, 'modified': False},
+                          {'default': True, 'type': 'integer',
+                           'name': 'Spec32/named_set_item/b',
+                           'value': 2, 'modified': False}], maps)
+        maps = self.mcd.get_value_maps("/Spec32/named_set_item/a")
+        self.assertEqual([{'default': True, 'type': 'integer',
+                           'name': 'Spec32/named_set_item/a',
+                           'value': 1, 'modified': False}], maps)
+        maps = self.mcd.get_value_maps("/Spec32/named_set_item/b")
+        self.assertEqual([{'default': True, 'type': 'integer',
+                           'name': 'Spec32/named_set_item/b',
+                           'value': 2, 'modified': False}], maps)
+
     def test_set_value(self):
         module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec2.spec")
         self.mcd.set_specification(module_spec)
@@ -582,6 +617,24 @@ class TestMultiConfigData(unittest.TestCase):
         config_items = self.mcd.get_config_item_list("Spec2", True)
         self.assertEqual(['Spec2/item1', 'Spec2/item2', 'Spec2/item3', 'Spec2/item4', 'Spec2/item5', 'Spec2/item6/value1', 'Spec2/item6/value2'], config_items)
 
+    def test_get_config_item_list_named_set(self):
+        config_items = self.mcd.get_config_item_list()
+        self.assertEqual([], config_items)
+        module_spec = isc.config.module_spec_from_file(self.data_path + os.sep + "spec32.spec")
+        self.mcd.set_specification(module_spec)
+        config_items = self.mcd.get_config_item_list()
+        self.assertEqual(['Spec32'], config_items)
+        config_items = self.mcd.get_config_item_list(None, False)
+        self.assertEqual(['Spec32'], config_items)
+        config_items = self.mcd.get_config_item_list(None, True)
+        self.assertEqual(['Spec32/named_set_item', 'Spec32/named_set_item2'], config_items)
+        self.mcd.set_value('Spec32/named_set_item', { "aaaa": 4, "aabb": 5, "bbbb": 6})
+        config_items = self.mcd.get_config_item_list("/Spec32/named_set_item", True)
+        self.assertEqual(['Spec32/named_set_item/aaaa',
+                          'Spec32/named_set_item/aabb',
+                          'Spec32/named_set_item/bbbb',
+                         ], config_items)
+
 if __name__ == '__main__':
     unittest.main()
 
diff --git a/src/lib/python/isc/config/tests/module_spec_test.py b/src/lib/python/isc/config/tests/module_spec_test.py
index a4dcdec..fc53d23 100644
--- a/src/lib/python/isc/config/tests/module_spec_test.py
+++ b/src/lib/python/isc/config/tests/module_spec_test.py
@@ -81,6 +81,11 @@ class TestModuleSpec(unittest.TestCase):
         self.assertRaises(ModuleSpecError, self.read_spec_file, "spec20.spec")
         self.assertRaises(ModuleSpecError, self.read_spec_file, "spec21.spec")
         self.assertRaises(ModuleSpecError, self.read_spec_file, "spec26.spec")
+        self.assertRaises(ModuleSpecError, self.read_spec_file, "spec34.spec")
+        self.assertRaises(ModuleSpecError, self.read_spec_file, "spec35.spec")
+        self.assertRaises(ModuleSpecError, self.read_spec_file, "spec36.spec")
+        self.assertRaises(ModuleSpecError, self.read_spec_file, "spec37.spec")
+        self.assertRaises(ModuleSpecError, self.read_spec_file, "spec38.spec")
 
     def validate_data(self, specfile_name, datafile_name):
         dd = self.read_spec_file(specfile_name);
@@ -98,6 +103,9 @@ class TestModuleSpec(unittest.TestCase):
         self.assertEqual(True, self.validate_data("spec22.spec", "data22_6.data"))
         self.assertEqual(True, self.validate_data("spec22.spec", "data22_7.data"))
         self.assertEqual(False, self.validate_data("spec22.spec", "data22_8.data"))
+        self.assertEqual(True, self.validate_data("spec32.spec", "data32_1.data"))
+        self.assertEqual(False, self.validate_data("spec32.spec", "data32_2.data"))
+        self.assertEqual(False, self.validate_data("spec32.spec", "data32_3.data"))
 
     def validate_command_params(self, specfile_name, datafile_name, cmd_name):
         dd = self.read_spec_file(specfile_name);
@@ -120,6 +128,17 @@ class TestModuleSpec(unittest.TestCase):
         self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd1'))
         self.assertEqual(False, self.validate_command_params("spec27.spec", "data22_8.data", 'cmd2'))
 
+    def test_statistics_validation(self):
+        def _validate_stat(specfile_name, datafile_name):
+            dd = self.read_spec_file(specfile_name);
+            data_file = open(self.spec_file(datafile_name))
+            data_str = data_file.read()
+            data = isc.cc.data.parse_value_str(data_str)
+            return dd.validate_statistics(True, data, [])
+        self.assertFalse(self.read_spec_file("spec1.spec").validate_statistics(True, None, None));
+        self.assertTrue(_validate_stat("spec33.spec", "data33_1.data"))
+        self.assertFalse(_validate_stat("spec33.spec", "data33_2.data"))
+
     def test_init(self):
         self.assertRaises(ModuleSpecError, ModuleSpec, 1)
         module_spec = isc.config.module_spec_from_file(self.spec_file("spec1.spec"), False)
@@ -266,6 +285,80 @@ class TestModuleSpec(unittest.TestCase):
                           }
                          )
 
+        self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+                          { 'item_name': "a_datetime",
+                            'item_type': "string",
+                            'item_optional': False,
+                            'item_default': 1,
+                            'item_format': "date-time"
+                          }
+                         )
+
+        self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+                          { 'item_name': "a_date",
+                            'item_type': "string",
+                            'item_optional': False,
+                            'item_default': 1,
+                            'item_format': "date"
+                          }
+                         )
+
+        self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+                          { 'item_name': "a_time",
+                            'item_type': "string",
+                            'item_optional': False,
+                            'item_default': 1,
+                            'item_format': "time"
+                          }
+                         )
+
+        self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+                          { 'item_name': "a_datetime",
+                            'item_type': "string",
+                            'item_optional': False,
+                            'item_default': "2011-05-27T19:42:57Z",
+                            'item_format': "dummy-format"
+                          }
+                         )
+
+        self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+                          { 'item_name': "a_date",
+                            'item_type': "string",
+                            'item_optional': False,
+                            'item_default': "2011-05-27",
+                            'item_format': "dummy-format"
+                          }
+                         )
+
+        self.assertRaises(ModuleSpecError, isc.config.module_spec._check_item_spec,
+                          { 'item_name': "a_time",
+                            'item_type': "string",
+                            'item_optional': False,
+                            'item_default': "19:42:57Z",
+                            'item_format': "dummy-format"
+                          }
+                         )
+
+    def test_check_format(self):
+        self.assertTrue(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'date-time'))
+        self.assertTrue(isc.config.module_spec._check_format('2011-05-27', 'date'))
+        self.assertTrue(isc.config.module_spec._check_format('19:42:57', 'time'))
+        self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57Z', 'dummy'))
+        self.assertFalse(isc.config.module_spec._check_format('2011-05-27', 'dummy'))
+        self.assertFalse(isc.config.module_spec._check_format('19:42:57', 'dummy'))
+        self.assertFalse(isc.config.module_spec._check_format('2011-13-99T99:99:99Z', 'date-time'))
+        self.assertFalse(isc.config.module_spec._check_format('2011-13-99', 'date'))
+        self.assertFalse(isc.config.module_spec._check_format('99:99:99', 'time'))
+        self.assertFalse(isc.config.module_spec._check_format('', 'date-time'))
+        self.assertFalse(isc.config.module_spec._check_format(None, 'date-time'))
+        self.assertFalse(isc.config.module_spec._check_format(None, None))
+        # wrong date-time-type format not ending with "Z"
+        self.assertFalse(isc.config.module_spec._check_format('2011-05-27T19:42:57', 'date-time'))
+        # wrong date-type format ending with "T"
+        self.assertFalse(isc.config.module_spec._check_format('2011-05-27T', 'date'))
+        # wrong time-type format ending with "Z"
+        self.assertFalse(isc.config.module_spec._check_format('19:42:57Z', 'time'))
+
     def test_validate_type(self):
         errors = []
         self.assertEqual(True, isc.config.module_spec._validate_type({ 'item_type': 'integer' }, 1, errors))
@@ -303,6 +396,25 @@ class TestModuleSpec(unittest.TestCase):
         self.assertEqual(False, isc.config.module_spec._validate_type({ 'item_type': 'map' }, 1, errors))
         self.assertEqual(['1 should be a map'], errors)
 
+    def test_validate_format(self):
+        errors = []
+        self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "2011-05-27T19:42:57Z", errors))
+        self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", None))
+        self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date-time' }, "a", errors))
+        self.assertEqual(['format type of a should be date-time'], errors)
+
+        errors = []
+        self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "2011-05-27", errors))
+        self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", None))
+        self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'date' }, "a", errors))
+        self.assertEqual(['format type of a should be date'], errors)
+
+        errors = []
+        self.assertEqual(True, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "19:42:57", errors))
+        self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", None))
+        self.assertEqual(False, isc.config.module_spec._validate_format({ 'item_format': 'time' }, "a", errors))
+        self.assertEqual(['format type of a should be time'], errors)
+
     def test_validate_spec(self):
         spec = { 'item_name': "an_item",
                  'item_type': "string",
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index 46fb661..fb6d151 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -1,10 +1,40 @@
 SUBDIRS = . tests
 
+# old data, should be removed in the near future once conversion is done
+pythondir = $(pyexecdir)/isc/datasrc
 python_PYTHON = __init__.py master.py sqlite3_ds.py
 
-pythondir = $(pyexecdir)/isc/datasrc
+
+# new data
+
+AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CPPFLAGS += $(SQLITE_CFLAGS)
+
+python_LTLIBRARIES = datasrc.la
+datasrc_la_SOURCES = datasrc.cc datasrc.h
+datasrc_la_SOURCES += client_python.cc client_python.h
+datasrc_la_SOURCES += iterator_python.cc iterator_python.h
+datasrc_la_SOURCES += finder_python.cc finder_python.h
+datasrc_la_SOURCES += updater_python.cc updater_python.h
+datasrc_la_SOURCES += journal_reader_python.cc journal_reader_python.h
+
+datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
+datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
+datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
+datasrc_la_LDFLAGS += -module
+datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
+datasrc_la_LIBADD += $(PYTHON_LIB)
+
+EXTRA_DIST = client_inc.cc
+EXTRA_DIST += finder_inc.cc
+EXTRA_DIST += iterator_inc.cc
+EXTRA_DIST += updater_inc.cc
+EXTRA_DIST += journal_reader_inc.cc
 
 CLEANDIRS = __pycache__
 
 clean-local:
 	rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/datasrc/__init__.py b/src/lib/python/isc/datasrc/__init__.py
index 0e1e481..7ebd918 100644
--- a/src/lib/python/isc/datasrc/__init__.py
+++ b/src/lib/python/isc/datasrc/__init__.py
@@ -1,2 +1,35 @@
-from isc.datasrc.master import *
+import sys
+import os
+
+# The datasource factory loader uses dlopen, as does python
+# for its modules. Some dynamic linkers do not play nice if 
+# modules are not loaded with RTLD_GLOBAL, a symptom of which
+# is that exceptions are not recognized by type. So to make
+# sure this doesn't happen, we temporarily set RTLD_GLOBAL
+# during the loading of the datasource wrappers.
+import ctypes
+flags = sys.getdlopenflags()
+sys.setdlopenflags(flags | ctypes.RTLD_GLOBAL)
+
+# this setup is a temporary workaround to deal with the problem of
+# having both 'normal' python modules and a wrapper module
+# Once all programs use the new interface, we should remove the
+# old, and the setup can be made similar to that of the log wrappers.
+intree = False
+for base in sys.path[:]:
+    datasrc_libdir = os.path.join(base, 'isc/datasrc/.libs')
+    if os.path.exists(datasrc_libdir):
+        sys.path.insert(0, datasrc_libdir)
+        intree = True
+
+if intree:
+    from datasrc import *
+else:
+    from isc.datasrc.datasrc import *
+
+# revert to the default dlopen flags
+sys.setdlopenflags(flags)
+
 from isc.datasrc.sqlite3_ds import *
+from isc.datasrc.master import *
+
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
new file mode 100644
index 0000000..e0c0f06
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -0,0 +1,249 @@
+namespace {
+
+const char* const DataSourceClient_doc = "\
+The base class of data source clients.\n\
+\n\
+This is the python wrapper for the abstract base class that defines\n\
+the common interface for various types of data source clients. A data\n\
+source client is a top level access point to a data source, allowing \n\
+various operations on the data source such as lookups, traversing or \n\
+updates.\n\
+This class serves as both the factory and the main interface to those \n\
+classes.\n\
+\n\
+The constructor takes two arguments; a type (string), and\n\
+configuration data for a datasource client of that type. The configuration\n\
+data is currently passed as a JSON in string form, and its contents depend\n\
+on the type of datasource from the first argument. For instance, a\n\
+datasource of type \"sqlite3\" takes the config \n\
+{ \"database_file\": \"/var/example.org\" }\n\
+We may in the future add support for passing configuration data,\n\
+but right now we limit it to a JSON-formatted string\n\
+\n\
+The client class itself has limited focus and delegates \n\
+the responsibility for these specific operations to other (c++) classes;\n\
+in general methods of this class act as factories of these other classes.\n\
+\n\
+- InMemoryClient: A client of a conceptual data source that stores all\n\
+  necessary data in memory for faster lookups\n\
+- DatabaseClient: A client that uses a real database backend (such as\n\
+  an SQL database). It would internally hold a connection to the\n\
+  underlying database system.\n\
+\n\
+It is intentional that while the term these derived classes don't\n\
+contain \"DataSource\" unlike their base class. It's also noteworthy\n\
+that the naming of the base class is somewhat redundant because the\n\
+namespace datasrc would indicate that it's related to a data source.\n\
+The redundant naming comes from the observation that namespaces are\n\
+often omitted with using directives, in which case \"Client\" would be\n\
+too generic. On the other hand, concrete derived classes are generally\n\
+not expected to be referenced directly from other modules and\n\
+applications, so we'll give them more concise names such as\n\
+InMemoryClient. A single DataSourceClient object is expected to handle\n\
+only a single RR class even if the underlying data source contains\n\
+records for multiple RR classes. Likewise, (when we support views) a\n\
+DataSourceClient object is expected to handle only a single view.\n\
+\n\
+If the application uses multiple threads, each thread will need to\n\
+create and use a separate DataSourceClient. This is because some\n\
+database backend doesn't allow multiple threads to share the same\n\
+connection to the database.\n\
+\n\
+For a client using an in memory backend, this may result in having a\n\
+multiple copies of the same data in memory, increasing the memory\n\
+footprint substantially. Depending on how to support multiple CPU\n\
+cores for concurrent lookups on the same single data source (which is\n\
+not fully fixed yet, and for which multiple threads may be used), this\n\
+design may have to be revisited. This class (and therefore its derived\n\
+classes) are not copyable. This is because the derived classes would\n\
+generally contain attributes that are not easy to copy (such as a\n\
+large size of in memory data or a network connection to a database\n\
+server). In order to avoid a surprising disruption with a naive copy\n\
+it's prohibited explicitly. For the expected usage of the client\n\
+classes the restriction should be acceptable.\n\
+\n\
+Todo: This class is still not complete. It will need more factory\n\
+methods, e.g. for (re)loading a zone.\n\
+";
+
+const char* const DataSourceClient_findZone_doc = "\
+find_zone(name) -> (code, ZoneFinder)\n\
+\n\
+Returns a ZoneFinder for a zone that best matches the given name.\n\
+\n\
+code: The result code of the operation (integer).\n\
+- DataSourceClient.SUCCESS: A zone that gives an exact match is found\n\
+- DataSourceClient.PARTIALMATCH: A zone whose origin is a super domain of name\n\
+  is found (but there is no exact match)\n\
+- DataSourceClient.NOTFOUND: For all other cases.\n\
+ZoneFinder: ZoneFinder object for the found zone if one is found;\n\
+otherwise None.\n\
+\n\
+Any internal error will be raised as an isc.datasrc.Error exception\n\
+\n\
+Parameters:\n\
+  name       A domain name for which the search is performed.\n\
+\n\
+Return Value(s): A tuple containing a result value and a ZoneFinder object or\n\
+None\n\
+";
+
+const char* const DataSourceClient_getIterator_doc = "\
+get_iterator(name, separate_rrs=False) -> ZoneIterator\n\
+\n\
+Returns an iterator to the given zone.\n\
+\n\
+This allows for traversing the whole zone. The returned object can\n\
+provide the RRsets one by one.\n\
+\n\
+This throws isc.datasrc.Error when the zone does not exist in the\n\
+datasource, or when an internal error occurs.\n\
+\n\
+The default implementation throws isc.datasrc.NotImplemented. This allows for\n\
+easy and fast deployment of minimal custom data sources, where the\n\
+user/implementator doesn't have to care about anything else but the\n\
+actual queries. Also, in some cases, it isn't possible to traverse the\n\
+zone from logic point of view (eg. dynamically generated zone data).\n\
+\n\
+It is not fixed if a concrete implementation of this method can throw\n\
+anything else.\n\
+\n\
+Parameters:\n\
+  isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
+               nearest match as find_zone.\n\
+  separate_rrs If true, the iterator will return each RR as a\n\
+               new RRset object. If false, the iterator will\n\
+               combine consecutive RRs with the name and type\n\
+               into 1 RRset. The capitalization of the RRset will\n\
+               be that of the first RR read, and TTLs will be\n\
+               adjusted to the lowest one found.\n\
+\n\
+Return Value(s): Pointer to the iterator.\n\
+";
+
+const char* const DataSourceClient_getUpdater_doc = "\
+get_updater(name, replace, journaling=False) -> ZoneUpdater\n\
+\n\
+Return an updater to make updates to a specific zone.\n\
+\n\
+The RR class of the zone is the one that the client is expected to\n\
+handle (see the detailed description of this class).\n\
+\n\
+If the specified zone is not found via the client, a None object will\n\
+be returned; in other words a completely new zone cannot be created\n\
+using an updater. It must be created beforehand (even if it's an empty\n\
+placeholder) in a way specific to the underlying data source.\n\
+\n\
+Conceptually, the updater will trigger a separate transaction for\n\
+subsequent updates to the zone within the context of the updater (the\n\
+actual implementation of the \"transaction\" may vary for the specific\n\
+underlying data source). Until commit() is performed on the updater,\n\
+the intermediate updates won't affect the results of other methods\n\
+(and the result of the object's methods created by other factory\n\
+methods). Likewise, if the updater is destructed without performing\n\
+commit(), the intermediate updates will be effectively canceled and\n\
+will never affect other methods.\n\
+\n\
+If the underlying data source allows concurrent updates, this method\n\
+can be called multiple times while the previously returned updater(s)\n\
+are still active. In this case each updater triggers a different\n\
+\"transaction\". Normally it would be for different zones for such a\n\
+case as handling multiple incoming AXFR streams concurrently, but this\n\
+interface does not even prohibit an attempt of getting more than one\n\
+updater for the same zone, as long as the underlying data source\n\
+allows such an operation (and any conflict resolution is left to the\n\
+specific implementation).\n\
+\n\
+If replace is true, any existing RRs of the zone will be deleted on\n\
+successful completion of updates (after commit() on the updater); if\n\
+it's false, the existing RRs will be intact unless explicitly deleted\n\
+by delete_rrset() on the updater.\n\
+\n\
+A data source can be \"read only\" or can prohibit partial updates. In\n\
+such cases this method will result in an isc.datasrc.NotImplemented exception\n\
+unconditionally or when replace is false).\n\
+\n\
+If journaling is True, the data source should store a journal of\n\
+changes. These can be used later on by, for example, IXFR-out.\n\
+However, the parameter is a hint only. It might be unable to store\n\
+them and they would be silently discarded. Or it might need to store\n\
+them no matter what (for example a git-based data source would store\n\
+journal implicitly). When the journaling is True, it requires that the\n\
+following update be formatted as IXFR transfer (SOA to be removed,\n\
+bunch of RRs to be removed, SOA to be added, bunch of RRs to be added,\n\
+and possibly repeated). However, it is not required that the updater\n\
+checks that. If it is False, it must not require so and must accept\n\
+any order of changes.\n\
+\n\
+We don't support erasing the whole zone (by replace being True) and\n\
+saving a journal at the same time. In such situation, isc.datasrc.Error\n\
+is thrown.\n\
+\n\
+Exceptions:\n\
+  isc.datasrc. NotImplemented The underlying data source does not support\n\
+               updates.\n\
+  isc.datasrc.Error Internal error in the underlying data source.\n\
+\n\
+Parameters:\n\
+  name       The zone name to be updated\n\
+  replace    Whether to delete existing RRs before making updates\n\
+  journaling The zone updater should store a journal of the changes.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+//   pointer -> (removed)
+//   Null -> None
+//   exception types
+const char* const DataSourceClient_getJournalReader_doc = "\
+get_journal_reader(zone, begin_serial, end_serial) ->\n\
+   (int, ZoneJournalReader)\n\
+\n\
+Return a journal reader to retrieve differences of a zone.\n\
+\n\
+A derived version of this method creates a concrete ZoneJournalReader\n\
+object specific to the underlying data source for the specified name\n\
+of zone and differences between the versions specified by the\n\
+beginning and ending serials of the corresponding SOA RRs. The RR\n\
+class of the zone is the one that the client is expected to handle\n\
+(see the detailed description of this class).\n\
+\n\
+Note that the SOA serials are compared by the semantics of the serial\n\
+number arithmetic. So, for example, begin_serial can be larger than\n\
+end_serial as bare unsigned integers. The underlying data source\n\
+implementation is assumed to keep track of sufficient history to\n\
+identify (if exist) the corresponding difference between the specified\n\
+versions.\n\
+\n\
+This method returns the result as a pair of a result code and a\n\
+ZoneJournalReader object. On success, the result code is\n\
+SUCCESS and the object must not be None; otherwise the result code is\n\
+something other than SUCCESS and the object must be None.\n\
+\n\
+If the specified zone is not found in the data source, the result code\n\
+is NO_SUCH_ZONE. Otherwise, if specified range of difference for the\n\
+zone is not found in the data source, the result code is\n\
+NO_SUCH_VERSION.\n\
+\n\
+Handling differences is an optional feature of data source. If the\n\
+underlying data source does not support difference handling, this\n\
+method for that type of data source can throw an exception of class\n\
+isc.datasrc.NotImplemented.\n\
+\n\
+Exceptions:\n\
+  isc.datasrc.NotImplemented The data source does not support differences.\n\
+  isc.datasrc.Error Other operational errors at the data source level.\n\
+  SystemError An unexpected error in the backend C++ code.  Either a rare\n\
+              system error such as short memory or an implementation bug.\n\
+\n\
+Parameters:\n\
+  zone       The name of the zone for which the difference should be\n\
+             retrieved.\n\
+  begin_serial The SOA serial of the beginning version of the\n\
+             differences.\n\
+  end_serial The SOA serial of the ending version of the differences.\n\
+\n\
+Return Value(s): A pair of result code and a ZoneJournalReader object\n\
+(which can be None)\n                                                  \
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
new file mode 100644
index 0000000..bdf84a3
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -0,0 +1,347 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/factory.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+#include "journal_reader_python.h"
+#include "client_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_DataSourceClient : public PyObject {
+public:
+    s_DataSourceClient() : cppobj(NULL) {};
+    DataSourceClientContainer* cppobj;
+};
+
+PyObject*
+DataSourceClient_findZone(PyObject* po_self, PyObject* args) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+    PyObject *name;
+    if (PyArg_ParseTuple(args, "O!", &name_type, &name)) {
+        try {
+            DataSourceClient::FindResult find_result(
+                self->cppobj->getInstance().findZone(PyName_ToName(name)));
+
+            result::Result r = find_result.code;
+            ZoneFinderPtr zfp = find_result.zone_finder;
+            // Use N instead of O so refcount isn't increased twice
+            return (Py_BuildValue("IN", r, createZoneFinderObject(zfp, po_self)));
+        } catch (const std::exception& exc) {
+            PyErr_SetString(getDataSourceException("Error"), exc.what());
+            return (NULL);
+        } catch (...) {
+            PyErr_SetString(getDataSourceException("Error"),
+                            "Unexpected exception");
+            return (NULL);
+        }
+    } else {
+        return (NULL);
+    }
+}
+
+PyObject*
+DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+    PyObject* name_obj;
+    PyObject* separate_rrs_obj = NULL;
+    if (PyArg_ParseTuple(args, "O!|O", &name_type, &name_obj,
+                         &separate_rrs_obj)) {
+        try {
+            bool separate_rrs = false;
+            if (separate_rrs_obj != NULL) {
+                // store result in local var so we can explicitely check for
+                // -1 error return value
+                int separate_rrs_true = PyObject_IsTrue(separate_rrs_obj);
+                if (separate_rrs_true == 1) {
+                    separate_rrs = true;
+                } else if (separate_rrs_true == -1) {
+                    PyErr_SetString(getDataSourceException("Error"),
+                                    "Error getting value of separate_rrs");
+                    return (NULL);
+                }
+            }
+            return (createZoneIteratorObject(
+                self->cppobj->getInstance().getIterator(PyName_ToName(name_obj),
+                                                        separate_rrs),
+                po_self));
+        } catch (const isc::NotImplemented& ne) {
+            PyErr_SetString(getDataSourceException("NotImplemented"),
+                            ne.what());
+            return (NULL);
+        } catch (const DataSourceError& dse) {
+            PyErr_SetString(getDataSourceException("Error"), dse.what());
+            return (NULL);
+        } catch (const std::exception& exc) {
+            PyErr_SetString(getDataSourceException("Error"), exc.what());
+            return (NULL);
+        } catch (...) {
+            PyErr_SetString(getDataSourceException("Error"),
+                            "Unexpected exception");
+            return (NULL);
+        }
+    } else {
+        return (NULL);
+    }
+}
+
+PyObject*
+DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+    PyObject *name_obj;
+    PyObject *replace_obj = NULL;
+    PyObject *journaling_obj = Py_False;
+    if (PyArg_ParseTuple(args, "O!O|O", &name_type, &name_obj,
+                         &replace_obj, &journaling_obj) &&
+        PyBool_Check(replace_obj) && PyBool_Check(journaling_obj)) {
+        const bool replace = (replace_obj != Py_False);
+        const bool journaling = (journaling_obj == Py_True);
+        try {
+            ZoneUpdaterPtr updater =
+                self->cppobj->getInstance().getUpdater(PyName_ToName(name_obj),
+                                                       replace, journaling);
+            if (!updater) {
+                return (Py_None);
+            }
+            return (createZoneUpdaterObject(updater, po_self));
+        } catch (const isc::NotImplemented& ne) {
+            PyErr_SetString(getDataSourceException("NotImplemented"),
+                            ne.what());
+            return (NULL);
+        } catch (const DataSourceError& dse) {
+            PyErr_SetString(getDataSourceException("Error"), dse.what());
+            return (NULL);
+        } catch (const std::exception& exc) {
+            PyErr_SetString(getDataSourceException("Error"), exc.what());
+            return (NULL);
+        } catch (...) {
+            PyErr_SetString(getDataSourceException("Error"),
+                            "Unexpected exception");
+            return (NULL);
+        }
+    } else {
+        // PyBool_Check doesn't set the error, so we have to set it ourselves.
+        if (replace_obj != NULL && !PyBool_Check(replace_obj)) {
+            PyErr_SetString(PyExc_TypeError, "'replace' for "
+                            "DataSourceClient.get_updater must be boolean");
+        }
+        if (!PyBool_Check(journaling_obj)) {
+            PyErr_SetString(PyExc_TypeError, "'journaling' for "
+                            "DataSourceClient.get_updater must be boolean");
+        }
+        return (NULL);
+    }
+}
+
+PyObject*
+DataSourceClient_getJournalReader(PyObject* po_self, PyObject* args) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+    PyObject *name_obj;
+    unsigned long begin_obj, end_obj;
+
+    if (PyArg_ParseTuple(args, "O!kk", &name_type, &name_obj,
+                         &begin_obj, &end_obj)) {
+        try {
+            pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+                self->cppobj->getInstance().getJournalReader(
+                    PyName_ToName(name_obj), static_cast<uint32_t>(begin_obj),
+                    static_cast<uint32_t>(end_obj));
+            PyObject* po_reader;
+            if (result.first == ZoneJournalReader::SUCCESS) {
+                po_reader = createZoneJournalReaderObject(result.second,
+                                                          po_self);
+            } else {
+                po_reader = Py_None;
+                Py_INCREF(po_reader); // this will soon be released
+            }
+            PyObjectContainer container(po_reader);
+            return (Py_BuildValue("(iO)", result.first, container.get()));
+        } catch (const isc::NotImplemented& ex) {
+            PyErr_SetString(getDataSourceException("NotImplemented"),
+                            ex.what());
+        } catch (const DataSourceError& ex) {
+            PyErr_SetString(getDataSourceException("Error"), ex.what());
+        } catch (const std::exception& ex) {
+            PyErr_SetString(PyExc_SystemError, ex.what());
+        } catch (...) {
+            PyErr_SetString(PyExc_SystemError, "Unexpected exception");
+        }
+    }
+    return (NULL);
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef DataSourceClient_methods[] = {
+    { "find_zone", DataSourceClient_findZone, METH_VARARGS,
+      DataSourceClient_findZone_doc },
+    { "get_iterator",
+      DataSourceClient_getIterator, METH_VARARGS,
+      DataSourceClient_getIterator_doc },
+    { "get_updater", DataSourceClient_getUpdater,
+      METH_VARARGS, DataSourceClient_getUpdater_doc },
+    { "get_journal_reader", DataSourceClient_getJournalReader,
+      METH_VARARGS, DataSourceClient_getJournalReader_doc },
+    { NULL, NULL, 0, NULL }
+};
+
+int
+DataSourceClient_init(PyObject* po_self, PyObject* args, PyObject*) {
+    s_DataSourceClient* self = static_cast<s_DataSourceClient*>(po_self);
+    char* ds_type_str;
+    char* ds_config_str;
+    try {
+        // Turn the given argument into config Element; then simply call
+        // factory class to do its magic
+
+        // for now, ds_config must be JSON string
+        if (PyArg_ParseTuple(args, "ss", &ds_type_str, &ds_config_str)) {
+            isc::data::ConstElementPtr ds_config =
+                isc::data::Element::fromJSON(ds_config_str);
+            self->cppobj = new DataSourceClientContainer(ds_type_str,
+                                                         ds_config);
+            return (0);
+        } else {
+            return (-1);
+        }
+    } catch (const isc::data::JSONError& je) {
+        const string ex_what = "JSON parse error in data source configuration "
+                               "data for type " +
+                               string(ds_type_str) + ":" + je.what();
+        PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+        return (-1);
+    } catch (const DataSourceError& dse) {
+        const string ex_what = "Failed to create DataSourceClient of type " +
+                               string(ds_type_str) + ":" + dse.what();
+        PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+        return (-1);
+    } catch (const exception& ex) {
+        const string ex_what = "Failed to construct DataSourceClient object: " +
+            string(ex.what());
+        PyErr_SetString(getDataSourceException("Error"), ex_what.c_str());
+        return (-1);
+    } catch (...) {
+        PyErr_SetString(PyExc_RuntimeError,
+            "Unexpected exception in constructing DataSourceClient");
+        return (-1);
+    }
+    PyErr_SetString(PyExc_TypeError,
+                    "Invalid arguments to DataSourceClient constructor");
+
+    return (-1);
+}
+
+void
+DataSourceClient_destroy(PyObject* po_self) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+    delete self->cppobj;
+    self->cppobj = NULL;
+    Py_TYPE(self)->tp_free(self);
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_DataSourceClient
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject datasourceclient_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "datasrc.DataSourceClient",
+    sizeof(s_DataSourceClient),         // tp_basicsize
+    0,                                  // tp_itemsize
+    DataSourceClient_destroy,           // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    NULL,                               // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    DataSourceClient_doc,
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    DataSourceClient_methods,           // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    DataSourceClient_init,              // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/client_python.h b/src/lib/python/isc/datasrc/client_python.h
new file mode 100644
index 0000000..b20fb6b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/client_python.h
@@ -0,0 +1,35 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_CLIENT_H
+#define __PYTHON_DATASRC_CLIENT_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject datasourceclient_type;
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_CLIENT_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
new file mode 100644
index 0000000..1573b81
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -0,0 +1,297 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include <structmember.h>
+
+#include <config.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+
+#include "datasrc.h"
+#include "client_python.h"
+#include "finder_python.h"
+#include "iterator_python.h"
+#include "updater_python.h"
+#include "journal_reader_python.h"
+
+#include <util/python/pycppwrapper_util.h>
+#include <dns/python/pydnspp_common.h>
+
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyObject*
+getDataSourceException(const char* ex_name) {
+    PyObject* ex_obj = NULL;
+
+    PyObject* datasrc_module = PyImport_AddModule("isc.datasrc");
+    if (datasrc_module != NULL) {
+        PyObject* datasrc_dict = PyModule_GetDict(datasrc_module);
+        if (datasrc_dict != NULL) {
+            ex_obj = PyDict_GetItemString(datasrc_dict, ex_name);
+        }
+    }
+
+    if (ex_obj == NULL) {
+        ex_obj = PyExc_RuntimeError;
+    }
+    return (ex_obj);
+}
+
+} // end namespace python
+} // end namespace datasrc
+} // end namespace isc
+
+namespace {
+
+bool
+initModulePart_DataSourceClient(PyObject* mod) {
+    // We initialize the static description object with PyType_Ready(),
+    // then add it to the module. This is not just a check! (leaving
+    // this out results in segmentation faults)
+    if (PyType_Ready(&datasourceclient_type) < 0) {
+        return (false);
+    }
+    void* dscp = &datasourceclient_type;
+    if (PyModule_AddObject(mod, "DataSourceClient", static_cast<PyObject*>(dscp)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&datasourceclient_type);
+
+    try {
+        installClassVariable(datasourceclient_type, "SUCCESS",
+                             Py_BuildValue("I", result::SUCCESS));
+        installClassVariable(datasourceclient_type, "EXIST",
+                             Py_BuildValue("I", result::EXIST));
+        installClassVariable(datasourceclient_type, "NOTFOUND",
+                             Py_BuildValue("I", result::NOTFOUND));
+        installClassVariable(datasourceclient_type, "PARTIALMATCH",
+                             Py_BuildValue("I", result::PARTIALMATCH));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in DataSourceClient initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+            "Unexpected failure in DataSourceClient initialization");
+        return (false);
+    }
+
+    return (true);
+}
+
+bool
+initModulePart_ZoneFinder(PyObject* mod) {
+    // We initialize the static description object with PyType_Ready(),
+    // then add it to the module. This is not just a check! (leaving
+    // this out results in segmentation faults)
+    if (PyType_Ready(&zonefinder_type) < 0) {
+        return (false);
+    }
+    void* zip = &zonefinder_type;
+    if (PyModule_AddObject(mod, "ZoneFinder", static_cast<PyObject*>(zip)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&zonefinder_type);
+
+    try {
+        installClassVariable(zonefinder_type, "SUCCESS",
+                             Py_BuildValue("I", ZoneFinder::SUCCESS));
+        installClassVariable(zonefinder_type, "DELEGATION",
+                             Py_BuildValue("I", ZoneFinder::DELEGATION));
+        installClassVariable(zonefinder_type, "NXDOMAIN",
+                             Py_BuildValue("I", ZoneFinder::NXDOMAIN));
+        installClassVariable(zonefinder_type, "NXRRSET",
+                             Py_BuildValue("I", ZoneFinder::NXRRSET));
+        installClassVariable(zonefinder_type, "CNAME",
+                             Py_BuildValue("I", ZoneFinder::CNAME));
+        installClassVariable(zonefinder_type, "DNAME",
+                             Py_BuildValue("I", ZoneFinder::DNAME));
+        installClassVariable(zonefinder_type, "WILDCARD",
+                             Py_BuildValue("I", ZoneFinder::WILDCARD));
+        installClassVariable(zonefinder_type, "WILDCARD_NXRRSET",
+                             Py_BuildValue("I", ZoneFinder::WILDCARD_NXRRSET));
+        installClassVariable(zonefinder_type, "WILDCARD_CNAME",
+                             Py_BuildValue("I", ZoneFinder::WILDCARD_CNAME));
+
+        installClassVariable(zonefinder_type, "FIND_DEFAULT",
+                             Py_BuildValue("I", ZoneFinder::FIND_DEFAULT));
+        installClassVariable(zonefinder_type, "FIND_GLUE_OK",
+                             Py_BuildValue("I", ZoneFinder::FIND_GLUE_OK));
+        installClassVariable(zonefinder_type, "FIND_DNSSEC",
+                             Py_BuildValue("I", ZoneFinder::FIND_DNSSEC));
+        installClassVariable(zonefinder_type, "NO_WILDCARD",
+                             Py_BuildValue("I", ZoneFinder::NO_WILDCARD));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in ZoneFinder initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in ZoneFinder initialization");
+        return (false);
+    }
+
+    return (true);
+}
+
+bool
+initModulePart_ZoneIterator(PyObject* mod) {
+    // We initialize the static description object with PyType_Ready(),
+    // then add it to the module. This is not just a check! (leaving
+    // this out results in segmentation faults)
+    if (PyType_Ready(&zoneiterator_type) < 0) {
+        return (false);
+    }
+    void* zip = &zoneiterator_type;
+    if (PyModule_AddObject(mod, "ZoneIterator", static_cast<PyObject*>(zip)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&zoneiterator_type);
+
+    return (true);
+}
+
+bool
+initModulePart_ZoneUpdater(PyObject* mod) {
+    // We initialize the static description object with PyType_Ready(),
+    // then add it to the module. This is not just a check! (leaving
+    // this out results in segmentation faults)
+    if (PyType_Ready(&zoneupdater_type) < 0) {
+        return (false);
+    }
+    void* zip = &zoneupdater_type;
+    if (PyModule_AddObject(mod, "ZoneUpdater", static_cast<PyObject*>(zip)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&zoneupdater_type);
+
+    return (true);
+}
+
+bool
+initModulePart_ZoneJournalReader(PyObject* mod) {
+    if (PyType_Ready(&journal_reader_type) < 0) {
+        return (false);
+    }
+    void* p = &journal_reader_type;
+    if (PyModule_AddObject(mod, "ZoneJournalReader",
+                           static_cast<PyObject*>(p)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&journal_reader_type);
+
+    try {
+        installClassVariable(journal_reader_type, "SUCCESS",
+                             Py_BuildValue("I", ZoneJournalReader::SUCCESS));
+        installClassVariable(journal_reader_type, "NO_SUCH_ZONE",
+                             Py_BuildValue("I",
+                                           ZoneJournalReader::NO_SUCH_ZONE));
+        installClassVariable(journal_reader_type, "NO_SUCH_VERSION",
+                             Py_BuildValue("I",
+                                           ZoneJournalReader::NO_SUCH_VERSION));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in ZoneJournalReader initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+            "Unexpected failure in ZoneJournalReader initialization");
+        return (false);
+    }
+
+    return (true);
+}
+
+PyObject* po_DataSourceError;
+PyObject* po_NotImplemented;
+
+PyModuleDef iscDataSrc = {
+    { PyObject_HEAD_INIT(NULL) NULL, 0, NULL},
+    "datasrc",
+    "Python bindings for the classes in the isc::datasrc namespace.\n\n"
+    "These bindings are close match to the C++ API, but they are not complete "
+    "(some parts are not needed) and some are done in more python-like ways.",
+    -1,
+    NULL,
+    NULL,
+    NULL,
+    NULL,
+    NULL
+};
+
+} // end anonymous namespace
+
+PyMODINIT_FUNC
+PyInit_datasrc(void) {
+    PyObject* mod = PyModule_Create(&iscDataSrc);
+    if (mod == NULL) {
+        return (NULL);
+    }
+
+    if (!initModulePart_DataSourceClient(mod)) {
+        Py_DECREF(mod);
+        return (NULL);
+    }
+
+    if (!initModulePart_ZoneFinder(mod)) {
+        Py_DECREF(mod);
+        return (NULL);
+    }
+
+    if (!initModulePart_ZoneIterator(mod)) {
+        Py_DECREF(mod);
+        return (NULL);
+    }
+
+    if (!initModulePart_ZoneUpdater(mod)) {
+        Py_DECREF(mod);
+        return (NULL);
+    }
+
+    if (!initModulePart_ZoneJournalReader(mod)) {
+        Py_DECREF(mod);
+        return (NULL);
+    }
+
+    try {
+        po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
+                                                NULL);
+        PyObjectContainer(po_DataSourceError).installToModule(mod, "Error");
+        po_NotImplemented = PyErr_NewException("isc.datasrc.NotImplemented",
+                                               NULL, NULL);
+        PyObjectContainer(po_NotImplemented).installToModule(mod,
+                                                             "NotImplemented");
+    } catch (...) {
+        Py_DECREF(mod);
+        return (NULL);
+    }
+
+    return (mod);
+}
diff --git a/src/lib/python/isc/datasrc/datasrc.h b/src/lib/python/isc/datasrc/datasrc.h
new file mode 100644
index 0000000..d82881b
--- /dev/null
+++ b/src/lib/python/isc/datasrc/datasrc.h
@@ -0,0 +1,50 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_H
+#define __PYTHON_DATASRC_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+// Return a Python exception object of the given name (ex_name) defined in
+// the isc.datasrc.datasrc loadable module.
+//
+// Since the datasrc module is a different binary image and is loaded separately
+// from the dns module, it would be very tricky to directly access to
+// C/C++ symbols defined in that module.  So we get access to these object
+// using the Python interpretor through this wrapper function.
+//
+// The __init__.py file should ensure isc.datasrc has been loaded by the time
+// whenever this function is called, and there shouldn't be any operation
+// within this function that can fail (such as dynamic memory allocation),
+// so this function should always succeed.  Yet there may be an overlooked
+// failure mode, perhaps due to a bug in the binding implementation, or
+// due to invalid usage.  As a last resort for such cases, this function
+// returns PyExc_RuntimeError (a C binding of Python's RuntimeError) should
+// it encounters an unexpected failure.
+extern PyObject* getDataSourceException(const char* ex_name);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
+#endif // __PYTHON_ACL_DNS_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
new file mode 100644
index 0000000..82c5fdc
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -0,0 +1,134 @@
+namespace {
+const char* const ZoneFinder_doc = "\
+The base class to search a zone for RRsets.\n\
+\n\
+The ZoneFinder class is a wrapper for the c++ base class for representing an\n\
+object that performs DNS lookups in a specific zone accessible via a\n\
+data source. In general, different types of data sources (in-memory,\n\
+database-based, etc) define their own derived c++ classes of ZoneFinder,\n\
+implementing ways to retrieve the required data through the common\n\
+interfaces declared in the base class. Each concrete ZoneFinder object\n\
+is therefore (conceptually) associated with a specific zone of one\n\
+specific data source instance.\n\
+\n\
+The origin name and the RR class of the associated zone are available\n\
+via the get_origin() and get_class() methods, respectively.\n\
+\n\
+The most important method of this class is find(), which performs the\n\
+lookup for a given domain and type. See the description of the method\n\
+for details.\n\
+\n\
+It's not clear whether we should request that a zone finder form a\n\
+\"transaction\", that is, whether to ensure the finder is not\n\
+susceptible to changes made by someone else than the creator of the\n\
+finder. If we don't request that, for example, two different lookup\n\
+results for the same name and type can be different if other threads\n\
+or programs make updates to the zone between the lookups. We should\n\
+revisit this point as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneFinder_getOrigin_doc = "\
+get_origin() -> isc.dns.Name\n\
+\n\
+Return the origin name of the zone.\n\
+\n\
+";
+
+const char* const ZoneFinder_getClass_doc = "\
+get_class() -> isc.dns.RRClass\n\
+\n\
+Return the RR class of the zone.\n\
+\n\
+";
+
+// Main changes from the C++ doxygen version:
+// - Return type: use tuple instead of the dedicated FindResult type
+// - NULL->None
+// - exceptions
+// - description of the 'target' parameter (must be None for now)
+const char* const ZoneFinder_find_doc = "\
+find(name, type, target=None, options=FIND_DEFAULT) -> (integer, RRset)\n\
+\n\
+Search the zone for a given pair of domain name and RR type.\n\
+\n\
+Each derived version of this method searches the underlying backend\n\
+for the data that best matches the given name and type. This method is\n\
+expected to be \"intelligent\", and identifies the best possible\n\
+answer for the search key. Specifically,\n\
+\n\
+- If the search name belongs under a zone cut, it returns the code of\n\
+  DELEGATION and the NS RRset at the zone cut.\n\
+- If there is no matching name, it returns the code of NXDOMAIN, and,\n\
+  if DNSSEC is requested, the NSEC RRset that proves the non-\n\
+  existence.\n\
+- If there is a matching name but no RRset of the search type, it\n\
+  returns the code of NXRRSET, and, if DNSSEC is required, the NSEC\n\
+  RRset for that name.\n\
+- If there is a CNAME RR of the searched name but there is no RR of\n\
+  the searched type of the name (so this type is different from\n\
+  CNAME), it returns the code of CNAME and that CNAME RR. Note that if\n\
+  the searched RR type is CNAME, it is considered a successful match,\n\
+  and the code of SUCCESS will be returned.\n\
+- If the search name matches a delegation point of DNAME, it returns\n\
+  the code of DNAME and that DNAME RR.\n\
+- If the target isn't None, all RRsets under the domain are inserted\n\
+  there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
+  instead of normall processing. This is intended to handle ANY query.\n\
+  (Note: the Python version doesn't support this feature yet)\n\
+\n\
+Note: This behavior is controversial as we discussed in\n\
+https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html We\n\
+should revisit the interface before we heavily rely on it.\n\
+\n\
+The options parameter specifies customized behavior of the search.\n\
+Their semantics is as follows (they are or bit-field):\n\
+\n\
+- FIND_GLUE_OK Allow search under a zone cut. By default the search\n\
+  will stop once it encounters a zone cut. If this option is specified\n\
+  it remembers information about the highest zone cut and continues\n\
+  the search until it finds an exact match for the given name or it\n\
+  detects there is no exact match. If an exact match is found, RRsets\n\
+  for that name are searched just like the normal case; otherwise, if\n\
+  the search has encountered a zone cut, DELEGATION with the\n\
+  information of the highest zone cut will be returned.\n\
+- FIND_DNSSEC Request that DNSSEC data (like NSEC, RRSIGs) are\n\
+  returned with the answer. It is allowed for the data source to\n\
+  include them even when not requested.\n\
+- NO_WILDCARD Do not try wildcard matching. This option is of no use\n\
+  for normal lookups; it's intended to be used to get a DNSSEC proof\n\
+  of the non existence of any matching wildcard or non existence of an\n\
+  exact match when a wildcard match is found.\n\
+\n\
+\n\
+This method raises an isc.datasrc.Error exception if there is an\n\
+internal error in the datasource.\n\
+\n\
+Parameters:\n\
+  name       The domain name to be searched for.\n\
+  type       The RR type to be searched for.\n\
+  target     Must be None.\n\
+  options    The search options.\n\
+\n\
+Return Value(s): A tuple of a result code (integer) and an RRset object\n\
+enclosing the search result (see above).\n\
+";
+
+const char* const ZoneFinder_find_previous_name_doc = "\
+find_previous_name(isc.dns.Name) -> isc.dns.Name\n\
+\n\
+Gets the previous name in the DNSSEC order. This can be used\n\
+to find the correct NSEC records for proving nonexistence\n\
+of domains.\n\
+\n\
+This method does not include under-zone-cut data (glue data).\n\
+\n\
+Raises isc.datasrc.NotImplemented in case the data source backend\n\
+doesn't support DNSSEC or there is no previous in the zone (NSEC\n\
+records might be missing in the DB, the queried name is less or\n\
+equal to the apex).\n\
+\n\
+Raises isc.datasrc.Error for low-level or internal datasource errors\n\
+(like broken connection to database, wrong data living there).\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
new file mode 100644
index 0000000..7f74133
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -0,0 +1,289 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+#include <dns/python/pydnspp_common.h>
+
+#include "datasrc.h"
+#include "finder_python.h"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// This is the shared code for the find() call in the finder and the updater
+// Is is intentionally not available through any header, nor at our standard
+// namespace, as it is not supposed to be called anywhere but from finder and
+// updater
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
+    if (finder == NULL) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Internal error in find() wrapper; "
+                        "finder object NULL");
+        return (NULL);
+    }
+    PyObject* name;
+    PyObject* rrtype;
+    PyObject* target = Py_None;
+    unsigned int options_int = ZoneFinder::FIND_DEFAULT;
+    if (PyArg_ParseTuple(args, "O!O!|OI", &name_type, &name,
+                                         &rrtype_type, &rrtype,
+                                         &target, &options_int)) {
+        try {
+            if (target != Py_None) {
+                PyErr_SetString(PyExc_TypeError,
+                                "find(): target must be None in this version");
+                return (NULL);
+            }
+            ZoneFinder::FindOptions options =
+                static_cast<ZoneFinder::FindOptions>(options_int);
+            const ZoneFinder::FindResult find_result(
+                finder->find(PyName_ToName(name), PyRRType_ToRRType(rrtype),
+                             NULL, options));
+            const ZoneFinder::Result r = find_result.code;
+            isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
+            if (rrsp) {
+                // Use N instead of O so the refcount isn't increased twice
+                return (Py_BuildValue("IN", r, createRRsetObject(*rrsp)));
+            } else {
+                return (Py_BuildValue("IO", r, Py_None));
+            }
+        } catch (const DataSourceError& dse) {
+            PyErr_SetString(getDataSourceException("Error"), dse.what());
+            return (NULL);
+        } catch (const std::exception& exc) {
+            PyErr_SetString(getDataSourceException("Error"), exc.what());
+            return (NULL);
+        } catch (...) {
+            PyErr_SetString(getDataSourceException("Error"),
+                            "Unexpected exception");
+            return (NULL);
+        }
+    } else {
+        return (NULL);
+    }
+    return Py_BuildValue("I", 1);
+}
+
+} // end namespace internal
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneFinder : public PyObject {
+public:
+    s_ZoneFinder() : cppobj(ZoneFinderPtr()), base_obj(NULL) {};
+    ZoneFinderPtr cppobj;
+    // This is a reference to a base object; if the object of this class
+    // depends on another object to be in scope during its lifetime,
+    // we use INCREF the base object upon creation, and DECREF it at
+    // the end of the destructor
+    // This is an optional argument to createXXX(). If NULL, it is ignored.
+    PyObject* base_obj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneFinder, ZoneFinder> ZoneFinderContainer;
+
+// General creation and destruction
+int
+ZoneFinder_init(s_ZoneFinder* self, PyObject* args) {
+    // can't be called directly
+    PyErr_SetString(PyExc_TypeError,
+                    "ZoneFinder cannot be constructed directly");
+
+    return (-1);
+}
+
+void
+ZoneFinder_destroy(s_ZoneFinder* const self) {
+    // cppobj is a shared ptr, but to make sure things are not destroyed in
+    // the wrong order, we reset it here.
+    self->cppobj.reset();
+    if (self->base_obj != NULL) {
+        Py_DECREF(self->base_obj);
+    }
+    Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneFinder_getClass(PyObject* po_self, PyObject*) {
+    s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+    try {
+        return (createRRClassObject(self->cppobj->getClass()));
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneFinder_getOrigin(PyObject* po_self, PyObject*) {
+    s_ZoneFinder* self = static_cast<s_ZoneFinder*>(po_self);
+    try {
+        return (createNameObject(self->cppobj->getOrigin()));
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Unexpected exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneFinder_find(PyObject* po_self, PyObject* args) {
+    s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
+    return (isc_datasrc_internal::ZoneFinder_helper(self->cppobj.get(), args));
+}
+
+PyObject*
+ZoneFinder_findPreviousName(PyObject* po_self, PyObject* args) {
+    s_ZoneFinder* const self = static_cast<s_ZoneFinder*>(po_self);
+    PyObject* name_obj;
+    if (PyArg_ParseTuple(args, "O!", &name_type, &name_obj)) {
+        try {
+            return (createNameObject(
+                self->cppobj->findPreviousName(PyName_ToName(name_obj))));
+        } catch (const isc::NotImplemented& nie) {
+            PyErr_SetString(getDataSourceException("NotImplemented"),
+                            nie.what());
+            return (NULL);
+        } catch (const std::exception& exc) {
+            PyErr_SetString(getDataSourceException("Error"), exc.what());
+            return (NULL);
+        } catch (...) {
+            PyErr_SetString(getDataSourceException("Error"),
+                            "Unexpected exception");
+            return (NULL);
+        }
+    } else {
+        return (NULL);
+    }
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneFinder_methods[] = {
+    { "get_origin", ZoneFinder_getOrigin, METH_NOARGS,
+       ZoneFinder_getOrigin_doc },
+    { "get_class", ZoneFinder_getClass, METH_NOARGS, ZoneFinder_getClass_doc },
+    { "find", ZoneFinder_find, METH_VARARGS, ZoneFinder_find_doc },
+    { "find_previous_name", ZoneFinder_findPreviousName, METH_VARARGS,
+      ZoneFinder_find_previous_name_doc },
+    { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+PyTypeObject zonefinder_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "datasrc.ZoneFinder",
+    sizeof(s_ZoneFinder),               // tp_basicsize
+    0,                                  // tp_itemsize
+    reinterpret_cast<destructor>(ZoneFinder_destroy),// tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    NULL,                               // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    ZoneFinder_doc,
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    ZoneFinder_methods,                 // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    reinterpret_cast<initproc>(ZoneFinder_init),// tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createZoneFinderObject(isc::datasrc::ZoneFinderPtr source, PyObject* base_obj) {
+    s_ZoneFinder* py_zf = static_cast<s_ZoneFinder*>(
+        zonefinder_type.tp_alloc(&zonefinder_type, 0));
+    if (py_zf != NULL) {
+        py_zf->cppobj = source;
+        py_zf->base_obj = base_obj;
+        if (base_obj != NULL) {
+            Py_INCREF(base_obj);
+        }
+    }
+    return (py_zf);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/finder_python.h b/src/lib/python/isc/datasrc/finder_python.h
new file mode 100644
index 0000000..23bc457
--- /dev/null
+++ b/src/lib/python/isc/datasrc/finder_python.h
@@ -0,0 +1,44 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_FINDER_H
+#define __PYTHON_DATASRC_FINDER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+
+namespace python {
+
+extern PyTypeObject zonefinder_type;
+
+/// \brief Create a ZoneFinder python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneFinder depends on
+///                 Its refcount is increased, and will be decreased when
+///                 this zone iterator is destroyed, making sure that the
+///                 base object is never destroyed before this zonefinder.
+PyObject* createZoneFinderObject(isc::datasrc::ZoneFinderPtr source,
+                                 PyObject* base_obj = NULL);
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_FINDER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/iterator_inc.cc b/src/lib/python/isc/datasrc/iterator_inc.cc
new file mode 100644
index 0000000..087200a
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_inc.cc
@@ -0,0 +1,67 @@
+namespace {
+
+const char* const ZoneIterator_doc = "\
+Read-only iterator to a zone.\n\
+\n\
+You can get an instance of the ZoneIterator from\n\
+DataSourceClient.get_iterator() method. The actual concrete\n\
+c++ implementation will be different depending on the actual data source\n\
+used. This is the abstract interface.\n\
+\n\
+There's no way to start iterating from the beginning again or return.\n\
+\n\
+The ZoneIterator is a python iterator, and can be iterated over directly.\n\
+";
+
+const char* const ZoneIterator_getNextRRset_doc = "\
+get_next_rrset() -> isc.dns.RRset\n\
+\n\
+Get next RRset from the zone.\n\
+\n\
+This returns the next RRset in the zone.\n\
+\n\
+Any special order is not guaranteed.\n\
+\n\
+While this can potentially throw anything (including standard\n\
+allocation errors), it should be rare.\n\
+\n\
+Pointer to the next RRset or None pointer when the iteration gets to\n\
+the end of the zone.\n\
+\n\
+Raises an isc.datasrc.Error exception if it is called again after returning\n\
+None\n\
+";
+
+// Modifications:
+//  - ConstRRset->RRset
+//  - NULL->None
+//  - removed notes about derived classes (which doesn't apply for python)
+const char* const ZoneIterator_getSOA_doc = "\
+get_soa() -> isc.dns.RRset\n\
+\n\
+Return the SOA record of the zone in the iterator context.\n\
+\n\
+This method returns the zone's SOA record (if any, and a valid zone\n\
+should have it) in the form of an RRset object. This SOA is identical\n\
+to that (again, if any) contained in the sequence of RRsets returned\n\
+by the iterator. In that sense this method is redundant, but is\n\
+provided as a convenient utility for the application of the iterator;\n\
+the application may need to know the SOA serial or the SOA RR itself\n\
+for the purpose of protocol handling or skipping the expensive\n\
+iteration processing.\n\
+\n\
+If the zone doesn't have an SOA (which is broken, but some data source\n\
+may allow that situation), this method returns None. Also, in the\n\
+normal and valid case, the SOA should have exactly one RDATA, but this\n\
+API does not guarantee it as some data source may accept such an\n\
+abnormal condition. It's up to the caller whether to check the number\n\
+of RDATA and how to react to the unexpected case.\n\
+\n\
+Exceptions:\n\
+  None\n\
+\n\
+Return Value(s): An SOA RRset object that would be\n\
+returned from the iteration. It will be None if the zone doesn't have\n\
+an SOA.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/iterator_python.cc b/src/lib/python/isc/datasrc/iterator_python.cc
new file mode 100644
index 0000000..9e6900c
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.cc
@@ -0,0 +1,242 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/iterator.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "iterator_python.h"
+
+#include "iterator_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneIterator : public PyObject {
+public:
+    s_ZoneIterator() : cppobj(ZoneIteratorPtr()), base_obj(NULL) {};
+    ZoneIteratorPtr cppobj;
+    // This is a reference to a base object; if the object of this class
+    // depends on another object to be in scope during its lifetime,
+    // we use INCREF the base object upon creation, and DECREF it at
+    // the end of the destructor
+    // This is an optional argument to createXXX(). If NULL, it is ignored.
+    PyObject* base_obj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneIterator, ZoneIterator>
+    ZoneIteratorContainer;
+
+// General creation and destruction
+int
+ZoneIterator_init(s_ZoneIterator* self, PyObject* args) {
+    // can't be called directly
+    PyErr_SetString(PyExc_TypeError,
+                    "ZoneIterator cannot be constructed directly");
+
+    return (-1);
+}
+
+void
+ZoneIterator_destroy(s_ZoneIterator* const self) {
+    // cppobj is a shared ptr, but to make sure things are not destroyed in
+    // the wrong order, we reset it here.
+    self->cppobj.reset();
+    if (self->base_obj != NULL) {
+        Py_DECREF(self->base_obj);
+    }
+    Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneIterator_getNextRRset(PyObject* po_self, PyObject*) {
+    s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+    if (!self->cppobj) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "get_next_rrset() called past end of iterator");
+        return (NULL);
+    }
+    try {
+        isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextRRset();
+        if (!rrset) {
+            Py_RETURN_NONE;
+        }
+        return (createRRsetObject(*rrset));
+    } catch (const isc::Exception& isce) {
+        // isc::Unexpected is thrown when we call getNextRRset() when we are
+        // already done iterating ('iterating past end')
+        // We could also simply return None again
+        PyErr_SetString(getDataSourceException("Error"), isce.what());
+        return (NULL);
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Unexpected exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneIterator_iter(PyObject *self) {
+    Py_INCREF(self);
+    return (self);
+}
+
+PyObject*
+ZoneIterator_next(PyObject* self) {
+    PyObject *result = ZoneIterator_getNextRRset(self, NULL);
+    // iter_next must return NULL without error instead of Py_None
+    if (result == Py_None) {
+        Py_DECREF(result);
+        return (NULL);
+    } else {
+        return (result);
+    }
+}
+
+PyObject*
+ZoneIterator_getSOA(PyObject* po_self, PyObject*) {
+    s_ZoneIterator* self = static_cast<s_ZoneIterator*>(po_self);
+    try {
+        isc::dns::ConstRRsetPtr rrset = self->cppobj->getSOA();
+        if (!rrset) {
+            Py_RETURN_NONE;
+        }
+        return (createRRsetObject(*rrset));
+    } catch (const isc::Exception& isce) {
+        // isc::Unexpected is thrown when we call getNextRRset() when we are
+        // already done iterating ('iterating past end')
+        // We could also simply return None again
+        PyErr_SetString(getDataSourceException("Error"), isce.what());
+        return (NULL);
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Unexpected exception");
+        return (NULL);
+    }
+}
+
+PyMethodDef ZoneIterator_methods[] = {
+    { "get_next_rrset", ZoneIterator_getNextRRset, METH_NOARGS,
+      ZoneIterator_getNextRRset_doc },
+    { "get_soa", ZoneIterator_getSOA, METH_NOARGS, ZoneIterator_getSOA_doc },
+    { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneiterator_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "datasrc.ZoneIterator",
+    sizeof(s_ZoneIterator),             // tp_basicsize
+    0,                                  // tp_itemsize
+    reinterpret_cast<destructor>(ZoneIterator_destroy),// tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    NULL,                               // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    ZoneIterator_doc,
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    ZoneIterator_iter,                  // tp_iter
+    ZoneIterator_next,                  // tp_iternext
+    ZoneIterator_methods,               // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    reinterpret_cast<initproc>(ZoneIterator_init),// tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source,
+                         PyObject* base_obj)
+{
+    s_ZoneIterator* py_zi = static_cast<s_ZoneIterator*>(
+        zoneiterator_type.tp_alloc(&zoneiterator_type, 0));
+    if (py_zi != NULL) {
+        py_zi->cppobj = source;
+        py_zi->base_obj = base_obj;
+        if (base_obj != NULL) {
+            Py_INCREF(base_obj);
+        }
+    }
+    return (py_zi);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/iterator_python.h b/src/lib/python/isc/datasrc/iterator_python.h
new file mode 100644
index 0000000..7c1b0eb
--- /dev/null
+++ b/src/lib/python/isc/datasrc/iterator_python.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_ITERATOR_H
+#define __PYTHON_DATASRC_ITERATOR_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+extern PyTypeObject zoneiterator_type;
+
+/// \brief Create a ZoneIterator python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneIterator depends on
+///                 Its refcount is increased, and will be decreased when
+///                 this zone iterator is destroyed, making sure that the
+///                 base object is never destroyed before this zone iterator.
+PyObject* createZoneIteratorObject(isc::datasrc::ZoneIteratorPtr source,
+                                   PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_ITERATOR_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/journal_reader_inc.cc b/src/lib/python/isc/datasrc/journal_reader_inc.cc
new file mode 100644
index 0000000..35ba70e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_inc.cc
@@ -0,0 +1,80 @@
+namespace {
+const char* const ZoneJournalReader_doc = "\
+The base class for retrieving differences between two versions of a\n\
+zone.\n\
+\n\
+On construction, each derived class object will internally set up\n\
+retrieving sequences of differences between two specific version of a\n\
+specific zone managed in a particular data source. So the constructor\n\
+of a derived class would normally take parameters to identify the zone\n\
+and the two versions for which the differences should be retrieved.\n\
+See DataSourceClient.get_journal_reader for more concrete details used\n\
+in this API.\n\
+\n\
+Once constructed, an object of this class will act like an iterator\n\
+over the sequences. Every time the get_next_diff() method is called it\n\
+returns one element of the differences in the form of an RRset until\n\
+it reaches the end of the entire sequences.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+//   ConstRRsetPtr -> RRset
+//   Null -> None
+//   InvalidOperation -> ValueError
+const char* const ZoneJournalReader_getNextDiff_doc = "\
+get_next_diff() -> isc.dns.RRset\n\
+\n\
+Return the next difference RR of difference sequences.\n\
+\n\
+In this API, the difference between two versions of a zone is\n\
+conceptually represented as IXFR-style difference sequences: Each\n\
+difference sequence is a sequence of RRs: an older version of SOA (to\n\
+be deleted), zero or more other deleted RRs, the post-transaction SOA\n\
+(to be added), and zero or more other added RRs. (Note, however, that\n\
+the underlying data source implementation may or may not represent the\n\
+difference in straightforward realization of this concept. The mapping\n\
+between the conceptual difference and the actual implementation is\n\
+hidden in each derived class).\n\
+\n\
+This method provides an application with a higher level interface to\n\
+retrieve the difference along with the conceptual model: the\n\
+ZoneJournalReader object iterates over the entire sequences from the\n\
+beginning SOA (which is to be deleted) to one of the added RR of with\n\
+the ending SOA, and each call to this method returns one RR in the\n\
+form of an RRset that contains exactly one RDATA in the order of the\n\
+sequences.\n\
+\n\
+Note that the ordering of the sequences specifies the semantics of\n\
+each difference: add or delete. For example, the first RR is to be\n\
+deleted, and the last RR is to be added. So the return value of this\n\
+method does not explicitly indicate whether the RR is to be added or\n\
+deleted.\n\
+\n\
+This method ensures the returned RRset represents an RR, that is, it\n\
+contains exactly one RDATA. However, it does not necessarily ensure\n\
+that the resulting sequences are in the form of IXFR-style. For\n\
+example, the first RR is supposed to be an SOA, and it should normally\n\
+be the case, but this interface does not necessarily require the\n\
+derived class implementation ensure this. Normally the differences are\n\
+expected to be stored using this API (via a ZoneUpdater object), and\n\
+as long as that is the case and the underlying implementation follows\n\
+the requirement of the API, the result of this method should be a\n\
+valid IXFR-style sequences. So this API does not mandate the almost\n\
+redundant check as part of the interface. If the application needs to\n\
+make it sure 100%, it must check the resulting sequence itself.\n\
+\n\
+Once the object reaches the end of the sequences, this method returns\n\
+None. Any subsequent call will result in an exception of class\n\
+ValueError.\n\
+\n\
+Exceptions:\n\
+  ValueError The method is called beyond the end of the\n\
+             difference sequences.\n\
+  isc.datasrc.Error Underlying data is broken and the RR cannot be\n\
+             created or other low level data source error.\n\
+\n\
+Return Value(s): An RRset that contains one RDATA corresponding to the\n\
+next difference in the sequences.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.cc b/src/lib/python/isc/datasrc/journal_reader_python.cc
new file mode 100644
index 0000000..ff398d1
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.cc
@@ -0,0 +1,200 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "journal_reader_python.h"
+
+#include "journal_reader_inc.cc"
+
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneJournalReader : public PyObject {
+public:
+    s_ZoneJournalReader() : cppobj(ZoneJournalReaderPtr()), base_obj(NULL) {};
+    ZoneJournalReaderPtr cppobj;
+    // This is a reference to a base object; if the object of this class
+    // depends on another object to be in scope during its lifetime,
+    // we use INCREF the base object upon creation, and DECREF it at
+    // the end of the destructor
+    // This is an optional argument to createXXX(). If NULL, it is ignored.
+    PyObject* base_obj;
+};
+
+// General creation and destruction
+int
+ZoneJournalReader_init(PyObject*, PyObject*, PyObject*) {
+    // can't be called directly
+    PyErr_SetString(PyExc_TypeError,
+                    "ZoneJournalReader cannot be constructed directly");
+
+    return (-1);
+}
+
+void
+ZoneJournalReader_destroy(PyObject* po_self) {
+    s_ZoneJournalReader* const self =
+        static_cast<s_ZoneJournalReader*>(po_self) ;
+    // cppobj is a shared ptr, but to make sure things are not destroyed in
+    // the wrong order, we reset it here.
+    self->cppobj.reset();
+    if (self->base_obj != NULL) {
+        Py_DECREF(self->base_obj);
+    }
+    Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneJournalReader_getNextDiff(PyObject* po_self, PyObject*) {
+    s_ZoneJournalReader* self = static_cast<s_ZoneJournalReader*>(po_self);
+    try {
+        isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextDiff();
+        if (!rrset) {
+            Py_RETURN_NONE;
+        }
+        return (createRRsetObject(*rrset));
+    } catch (const isc::InvalidOperation& ex) {
+        PyErr_SetString(PyExc_ValueError, ex.what());
+        return (NULL);
+    } catch (const isc::Exception& isce) {
+        PyErr_SetString(getDataSourceException("Error"), isce.what());
+        return (NULL);
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Unexpected exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneJournalReader_iter(PyObject *self) {
+    Py_INCREF(self);
+    return (self);
+}
+
+PyObject*
+ZoneJournalReader_next(PyObject* self) {
+    PyObject* result = ZoneJournalReader_getNextDiff(self, NULL);
+    // iter_next must return NULL without error instead of Py_None
+    if (result == Py_None) {
+        Py_DECREF(result);
+        return (NULL);
+    } else {
+        return (result);
+    }
+}
+
+PyMethodDef ZoneJournalReader_methods[] = {
+    { "get_next_diff", ZoneJournalReader_getNextDiff, METH_NOARGS,
+      ZoneJournalReader_getNextDiff_doc },
+    { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject journal_reader_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "datasrc.ZoneJournalReader",
+    sizeof(s_ZoneJournalReader),             // tp_basicsize
+    0,                                  // tp_itemsize
+    ZoneJournalReader_destroy,          // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    NULL,                               // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    ZoneJournalReader_doc,
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    ZoneJournalReader_iter,                  // tp_iter
+    ZoneJournalReader_next,                  // tp_iternext
+    ZoneJournalReader_methods,               // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    ZoneJournalReader_init,             // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createZoneJournalReaderObject(ZoneJournalReaderPtr source,
+                              PyObject* base_obj)
+{
+    s_ZoneJournalReader* po = static_cast<s_ZoneJournalReader*>(
+        journal_reader_type.tp_alloc(&journal_reader_type, 0));
+    if (po != NULL) {
+        po->cppobj = source;
+        po->base_obj = base_obj;
+        if (base_obj != NULL) {
+            Py_INCREF(base_obj);
+        }
+    }
+    return (po);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.h b/src/lib/python/isc/datasrc/journal_reader_python.h
new file mode 100644
index 0000000..56344df
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_JOURNAL_READER_H
+#define __PYTHON_DATASRC_JOURNAL_READER_H 1
+
+#include <Python.h>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+extern PyTypeObject journal_reader_type;
+
+/// \brief Create a ZoneJournalReader python object
+///
+/// \param source The zone journal reader pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneJournalReader depends on
+///                 Its refcount is increased, and will be decreased when
+///                 this reader is destroyed, making sure that the
+///                 base object is never destroyed before this reader.
+PyObject* createZoneJournalReaderObject(
+    isc::datasrc::ZoneJournalReaderPtr source,
+    PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_JOURNAL_READER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index a77645a..daa12fc 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -33,44 +33,71 @@ def create(cur):
     Arguments:
         cur - sqlite3 cursor.
     """
-    cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
-    cur.execute("INSERT INTO schema_version VALUES (1)")
-    cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
-                   name STRING NOT NULL COLLATE NOCASE,
-                   rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
-                   dnssec BOOLEAN NOT NULL DEFAULT 0)""")
-    cur.execute("CREATE INDEX zones_byname ON zones (name)")
-    cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
-                   zone_id INTEGER NOT NULL,
-                   name STRING NOT NULL COLLATE NOCASE,
-                   rname STRING NOT NULL COLLATE NOCASE,
-                   ttl INTEGER NOT NULL,
-                   rdtype STRING NOT NULL COLLATE NOCASE,
-                   sigtype STRING COLLATE NOCASE,
-                   rdata STRING NOT NULL)""")
-    cur.execute("CREATE INDEX records_byname ON records (name)")
-    cur.execute("CREATE INDEX records_byrname ON records (rname)")
-    cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
-                   zone_id INTEGER NOT NULL,
-                   hash STRING NOT NULL COLLATE NOCASE,
-                   owner STRING NOT NULL COLLATE NOCASE,
-                   ttl INTEGER NOT NULL,
-                   rdtype STRING NOT NULL COLLATE NOCASE,
-                   rdata STRING NOT NULL)""")
-    cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
-
-def open(dbfile):
+    # We are creating the database because it apparently had not been at
+    # the time we tried to read from it. However, another process may have
+    # had the same idea, resulting in a potential race condition.
+    # Therefore, we obtain an exclusive lock before we create anything
+    # When we have it, we check *again* whether the database has been
+    # initialized. If not, we do so.
+
+    # If the database is perpetually locked, it'll time out automatically
+    # and we just let it fail.
+    cur.execute("BEGIN EXCLUSIVE TRANSACTION")
+    try:
+        cur.execute("SELECT version FROM schema_version")
+        row = cur.fetchone()
+    except sqlite3.OperationalError:
+        cur.execute("CREATE TABLE schema_version (version INTEGER NOT NULL)")
+        cur.execute("INSERT INTO schema_version VALUES (1)")
+        cur.execute("""CREATE TABLE zones (id INTEGER PRIMARY KEY,
+                    name STRING NOT NULL COLLATE NOCASE,
+                    rdclass STRING NOT NULL COLLATE NOCASE DEFAULT 'IN',
+                    dnssec BOOLEAN NOT NULL DEFAULT 0)""")
+        cur.execute("CREATE INDEX zones_byname ON zones (name)")
+        cur.execute("""CREATE TABLE records (id INTEGER PRIMARY KEY,
+                    zone_id INTEGER NOT NULL,
+                    name STRING NOT NULL COLLATE NOCASE,
+                    rname STRING NOT NULL COLLATE NOCASE,
+                    ttl INTEGER NOT NULL,
+                    rdtype STRING NOT NULL COLLATE NOCASE,
+                    sigtype STRING COLLATE NOCASE,
+                    rdata STRING NOT NULL)""")
+        cur.execute("CREATE INDEX records_byname ON records (name)")
+        cur.execute("CREATE INDEX records_byrname ON records (rname)")
+        cur.execute("""CREATE TABLE nsec3 (id INTEGER PRIMARY KEY,
+                    zone_id INTEGER NOT NULL,
+                    hash STRING NOT NULL COLLATE NOCASE,
+                    owner STRING NOT NULL COLLATE NOCASE,
+                    ttl INTEGER NOT NULL,
+                    rdtype STRING NOT NULL COLLATE NOCASE,
+                    rdata STRING NOT NULL)""")
+        cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
+        cur.execute("""CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+                    zone_id INTEGER NOT NULL,
+                    version INTEGER NOT NULL,
+                    operation INTEGER NOT NULL,
+                    name STRING NOT NULL COLLATE NOCASE,
+                    rrtype STRING NOT NULL COLLATE NOCASE,
+                    ttl INTEGER NOT NULL,
+                    rdata STRING NOT NULL)""")
+        row = [1]
+    cur.execute("COMMIT TRANSACTION")
+    return row
+
+def open(dbfile, connect_timeout=5.0):
     """ Open a database, if the database is not yet set up, call create
     to do so. It may raise Sqlite3DSError if failed to open sqlite3
     database file or find bad database schema version in the database.
 
     Arguments:
         dbfile - the filename for the sqlite3 database.
+        connect_timeout - timeout for opening the database or acquiring locks
+                          defaults to sqlite3 module's default of 5.0 seconds
 
     Return sqlite3 connection, sqlite3 cursor.
     """
     try:
-        conn = sqlite3.connect(dbfile)
+        conn = sqlite3.connect(dbfile, timeout=connect_timeout)
         cur = conn.cursor()
     except Exception as e:
         fail = "Failed to open " + dbfile + ": " + e.args[0]
@@ -80,10 +107,13 @@ def open(dbfile):
     try:
         cur.execute("SELECT version FROM schema_version")
         row = cur.fetchone()
-    except:
-        create(cur)
-        conn.commit()
-        row = [1]
+    except sqlite3.OperationalError:
+        # temporarily disable automatic transactions so
+        # we can do our own
+        iso_lvl = conn.isolation_level
+        conn.isolation_level = None
+        row = create(cur)
+        conn.isolation_level = iso_lvl
 
     if row == None or row[0] != 1:
         raise Sqlite3DSError("Bad database schema version")
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 6f6d157..ab89b93 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -1,16 +1,24 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = master_test.py sqlite3_ds_test.py
+# old tests, TODO remove or change to use new API?
+#PYTESTS = master_test.py sqlite3_ds_test.py
+PYTESTS =  datasrc_test.py
 EXTRA_DIST = $(PYTESTS)
 
 EXTRA_DIST += testdata/brokendb.sqlite3
 EXTRA_DIST += testdata/example.com.sqlite3
-CLEANFILES = $(abs_builddir)/example.com.out.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
+CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
-LIBRARY_PATH_PLACEHOLDER =
+# We always add one, the location of the data source modules
+# We may want to add an API method for this to the ds factory, but that is out
+# of scope for this ticket
+LIBRARY_PATH_PLACEHOLDER = $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs:
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -23,8 +31,9 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+	PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	TESTDATA_PATH=$(abs_srcdir)/testdata \
 	TESTDATA_WRITE_PATH=$(abs_builddir) \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
new file mode 100644
index 0000000..3e4a1d7
--- /dev/null
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -0,0 +1,854 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import isc.datasrc
+from isc.datasrc import ZoneFinder, ZoneJournalReader
+from isc.dns import *
+from isc.testutils.rrset_utils import rrsets_equal
+import unittest
+import sqlite3
+import os
+import shutil
+import sys
+import json
+
+TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
+TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
+
+READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "rwtest.sqlite3.copied"
+
+READ_ZONE_DB_CONFIG = "{ \"database_file\": \"" + READ_ZONE_DB_FILE + "\" }"
+WRITE_ZONE_DB_CONFIG = "{ \"database_file\": \"" + WRITE_ZONE_DB_FILE + "\"}"
+
+def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
+    rrset_to_add = isc.dns.RRset(name, rrclass, rrtype, ttl)
+    if rdatas is not None:
+        for rdata in rdatas:
+            rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
+    rrset_list.append(rrset_to_add)
+
+# returns true if rrset is in expected_rrsets
+# will remove the rrset from expected_rrsets if found
+def check_for_rrset(expected_rrsets, rrset):
+    for cur_rrset in expected_rrsets[:]:
+        if rrsets_equal(cur_rrset, rrset):
+            expected_rrsets.remove(cur_rrset)
+            return True
+    return False
+
+def create_soa(serial):
+    soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+    soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                        'ns1.example.org. admin.example.org. ' +
+                        str(serial) + ' 3600 1800 2419200 7200'))
+    return soa
+
+class DataSrcClient(unittest.TestCase):
+
+    def test_(self):
+        # can't construct directly
+        self.assertRaises(TypeError, isc.datasrc.ZoneIterator)
+
+        self.assertRaises(TypeError, isc.datasrc.DataSourceClient, 1, "{}")
+        self.assertRaises(TypeError, isc.datasrc.DataSourceClient, "sqlite3", 1)
+        self.assertRaises(isc.datasrc.Error,
+                          isc.datasrc.DataSourceClient, "foo", "{}")
+        self.assertRaises(isc.datasrc.Error,
+                          isc.datasrc.DataSourceClient, "sqlite3", "")
+        self.assertRaises(isc.datasrc.Error,
+                          isc.datasrc.DataSourceClient, "sqlite3", "{}")
+        self.assertRaises(isc.datasrc.Error,
+                          isc.datasrc.DataSourceClient, "sqlite3",
+                          "{ \"foo\": 1 }")
+        self.assertRaises(isc.datasrc.Error,
+                          isc.datasrc.DataSourceClient, "memory",
+                          "{ \"foo\": 1 }")
+
+    def test_iterate(self):
+        dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+        # for RRSIGS, the TTL's are currently modified. This test should
+        # start failing when we fix that.
+        rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), True)
+
+        # we do not know the order in which they are returned by the iterator
+        # but we do want to check them, so we put all records into one list
+        # sort it (doesn't matter which way it is sorted, as long as it is
+        # sorted)
+
+        # RRset is (atm) an unorderable type, and within an rrset, the
+        # rdatas and rrsigs may also be in random order. In theory the
+        # rrsets themselves can be returned in any order.
+        #
+        # So we create a second list with all rrsets we expect, and for each
+        # rrset we get from the iterator, see if it is in that list, and
+        # remove it.
+        #
+        # When the iterator is empty, we check no rrsets are left in the
+        # list of expected ones
+        expected_rrset_list = []
+
+        name = isc.dns.Name("sql1.example.com")
+        rrclass = isc.dns.RRClass.IN()
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+                  [
+                     "256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
+                     "N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
+                     "TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
+                     "5fs0dE/xLztL/CzZ"
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+                  [
+                     "257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
+                     "KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
+                     "ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
+                     "taX3lRRPLYWpxRcDPEjysXT3Lh0vfL5D+CIO1yKw/q7C+v6+/kYAxc2l"+
+                     "fbNE3HpklSuF+dyX4nXxWgzbcFuLz5Bwfq6ZJ9RYe/kNkA0uMWNa1KkG"+
+                     "eRh8gg22kgD/KT5hPTnpezUWLvoY5Qc7IB3T0y4n2JIwiF2ZrZYVrWgD"+
+                     "jRWAzGsxJiJyjd6w2k0="
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+                  [
+                    "dns01.example.com."
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+                  [
+                    "dns02.example.com."
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+                  [
+                    "dns03.example.com."
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+                  [
+                     "www.sql1.example.com. NS SOA RRSIG NSEC DNSKEY"
+                  ])
+        # For RRSIGS, we can't add the fake data through the API, so we
+        # simply pass no rdata at all (which is skipped by the check later)
+        
+        # Since we passed separate_rrs = True to get_iterator, we get several
+        # sets of RRSIGs, one for each TTL
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
+                  [
+                     "master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
+                  ])
+        name = isc.dns.Name("www.sql1.example.com.")
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.A(), isc.dns.RRTTL(3600),
+                  [
+                     "192.0.2.100"
+                  ])
+        name = isc.dns.Name("www.sql1.example.com.")
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.NSEC(), isc.dns.RRTTL(7200),
+                  [
+                     "sql1.example.com. A RRSIG NSEC"
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+
+        # rrs is an iterator, but also has direct get_next_rrset(), use
+        # the latter one here
+        rrset_to_check = rrs.get_next_rrset()
+        while (rrset_to_check != None):
+            self.assertTrue(check_for_rrset(expected_rrset_list,
+                                            rrset_to_check),
+                            "Unexpected rrset returned by iterator:\n" +
+                            rrset_to_check.to_text())
+            rrset_to_check = rrs.get_next_rrset()
+
+        # Now check there are none left
+        self.assertEqual(0, len(expected_rrset_list),
+                         "RRset(s) not returned by iterator: " +
+                         str([rrset.get_name().to_text() + '/' +
+                              rrset.get_type().to_text() for rrset in
+                              expected_rrset_list ]
+                        ))
+
+        # TODO should we catch this (iterating past end) and just return None
+        # instead of failing?
+        self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+        # Without the separate_rrs argument, it should return 55 RRsets
+        dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+        rrets = dsc.get_iterator(isc.dns.Name("example.com"))
+        # there are more than 80 RRs in this zone... let's just count them
+        # (already did a full check of the smaller zone above)
+        self.assertEqual(55, len(list(rrets)))
+
+        # same test, but now with explicit False argument for separate_rrs
+        dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+        rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
+        # there are more than 80 RRs in this zone... let's just count them
+        # (already did a full check of the smaller zone above)
+        self.assertEqual(55, len(list(rrets)))
+
+        # Count should be 71 if we request individual rrsets for differing ttls
+        dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+        rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
+        # there are more than 80 RRs in this zone... let's just count them
+        # (already did a full check of the smaller zone above)
+        self.assertEqual(84, len(list(rrets)))
+        # TODO should we catch this (iterating past end) and just return None
+        # instead of failing?
+        self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
+
+        self.assertRaises(TypeError, dsc.get_iterator, "asdf")
+
+    def test_iterator_soa(self):
+        dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+        iterator = dsc.get_iterator(isc.dns.Name("sql1.example.com."))
+        expected_soa = isc.dns.RRset(isc.dns.Name("sql1.example.com."),
+                                     isc.dns.RRClass.IN(),
+                                     isc.dns.RRType.SOA(),
+                                     isc.dns.RRTTL(3600))
+        expected_soa.add_rdata(isc.dns.Rdata(isc.dns.RRType.SOA(),
+                                             isc.dns.RRClass.IN(),
+                                             "master.example.com. " +
+                                             "admin.example.com. 678 " +
+                                             "3600 1800 2419200 7200"))
+        self.assertTrue(rrsets_equal(expected_soa, iterator.get_soa()))
+
+    def test_construct(self):
+        # can't construct directly
+        self.assertRaises(TypeError, isc.datasrc.ZoneFinder)
+
+    def test_findoptions(self):
+        '''A simple test to confirm no option is specified by default.
+
+        '''
+        self.assertFalse(ZoneFinder.FIND_DEFAULT & ZoneFinder.FIND_GLUE_OK)
+        self.assertFalse(ZoneFinder.FIND_DEFAULT & ZoneFinder.FIND_DNSSEC)
+        self.assertFalse(ZoneFinder.FIND_DEFAULT & ZoneFinder.NO_WILDCARD)
+
+    def test_findresults(self):
+        '''A simple test to confirm result codes are (defined and) different
+        for some combinations.
+
+        '''
+        self.assertNotEqual(ZoneFinder.SUCCESS, ZoneFinder.DELEGATION)
+        self.assertNotEqual(ZoneFinder.DELEGATION, ZoneFinder.NXDOMAIN)
+        self.assertNotEqual(ZoneFinder.NXDOMAIN, ZoneFinder.NXRRSET)
+        self.assertNotEqual(ZoneFinder.NXRRSET, ZoneFinder.CNAME)
+        self.assertNotEqual(ZoneFinder.CNAME, ZoneFinder.DNAME)
+        self.assertNotEqual(ZoneFinder.DNAME, ZoneFinder.WILDCARD)
+        self.assertNotEqual(ZoneFinder.WILDCARD, ZoneFinder.WILDCARD_CNAME)
+        self.assertNotEqual(ZoneFinder.WILDCARD_CNAME,
+                            ZoneFinder.WILDCARD_NXRRSET)
+
+    def test_find(self):
+        dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+        result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+        self.assertEqual("example.com.", finder.get_origin().to_text())
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Check the optional parameters are optional
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A())
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(), None)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Invalid value for the "target"
+        self.assertRaises(TypeError, finder.find,
+                          isc.dns.Name("www.example.com"),
+                          isc.dns.RRType.A(), True)
+
+        result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.DELEGATION, result)
+        self.assertEqual("sql1.example.com. 3600 IN NS dns01.example.com.\n" +
+                         "sql1.example.com. 3600 IN NS dns02.example.com.\n" +
+                         "sql1.example.com. 3600 IN NS dns03.example.com.\n",
+                         rrset.to_text())
+
+        result, rrset = finder.find(isc.dns.Name("doesnotexist.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.NXDOMAIN, result)
+        self.assertEqual(None, rrset)
+
+        result, rrset = finder.find(isc.dns.Name("www.some.other.domain"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.NXDOMAIN, result)
+        self.assertEqual(None, rrset)
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.TXT(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.NXRRSET, result)
+        self.assertEqual(None, rrset)
+
+        result, rrset = finder.find(isc.dns.Name("cname-ext.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.CNAME, result)
+        self.assertEqual(
+            "cname-ext.example.com. 3600 IN CNAME www.sql1.example.com.\n",
+            rrset.to_text())
+
+        result, rrset = finder.find(isc.dns.Name("foo.wild.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.WILDCARD, result)
+        self.assertEqual("foo.wild.example.com. 3600 IN A 192.0.2.255\n",
+                         rrset.to_text())
+
+        result, rrset = finder.find(isc.dns.Name("foo.wild.example.com"),
+                                    isc.dns.RRType.TXT(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.WILDCARD_NXRRSET, result)
+        self.assertEqual(None, rrset)
+
+        self.assertRaises(TypeError, finder.find,
+                          "foo",
+                          isc.dns.RRType.A(),
+                          None,
+                          finder.FIND_DEFAULT)
+        self.assertRaises(TypeError, finder.find,
+                          isc.dns.Name("cname-ext.example.com"),
+                          "foo",
+                          None,
+                          finder.FIND_DEFAULT)
+        self.assertRaises(TypeError, finder.find,
+                          isc.dns.Name("cname-ext.example.com"),
+                          isc.dns.RRType.A(),
+                          None,
+                          "foo")
+
+    def test_find_previous(self):
+        dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+        result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+        self.assertEqual(finder.SUCCESS, result)
+
+        prev = finder.find_previous_name(isc.dns.Name("bbb.example.com"))
+        self.assertEqual("example.com.", prev.to_text())
+
+        prev = finder.find_previous_name(isc.dns.Name("zzz.example.com"))
+        self.assertEqual("www.example.com.", prev.to_text())
+
+        prev = finder.find_previous_name(prev)
+        self.assertEqual("*.wild.example.com.", prev.to_text())
+
+        self.assertRaises(isc.datasrc.NotImplemented,
+                          finder.find_previous_name,
+                          isc.dns.Name("com"))
+
+class DataSrcUpdater(unittest.TestCase):
+
+    def setUp(self):
+        # Make a fresh copy of the writable database with all original content
+        shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+
+    def test_construct(self):
+        # can't construct directly
+        self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
+
+    def test_update_finder(self):
+        # Check basic behavior of updater's finder
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+        updater = dsc.get_updater(isc.dns.Name("example.com"), False)
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A(),
+                                     None,
+                                     ZoneFinder.FIND_DEFAULT)
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Omit optional parameters
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A())
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A(), None)
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Invalid value for 'target'
+        self.assertRaises(TypeError, updater.find,
+                          isc.dns.Name("www.example.com"),
+                          isc.dns.RRType.A(), 1)
+
+    def test_update_delete_commit(self):
+
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+
+        # first make sure, through a separate finder, that some record exists
+        result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+        self.assertEqual("example.com.", finder.get_origin().to_text())
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        rrset_to_delete = rrset;
+
+        # can't delete rrset with associated sig. Abuse that to force an
+        # exception first, then remove the sig, then delete the record
+        updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+        self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+                          rrset_to_delete)
+
+        rrset_to_delete.remove_rrsig()
+
+        updater.delete_rrset(rrset_to_delete)
+
+        # The record should be gone in the updater, but not in the original
+        # finder (since we have not committed)
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A(),
+                                     None,
+                                     finder.FIND_DEFAULT)
+        self.assertEqual(finder.NXDOMAIN, result)
+        self.assertEqual(None, rrset)
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        updater.commit()
+        # second commit should raise exception
+        self.assertRaises(isc.datasrc.Error, updater.commit)
+
+        # the record should be gone now in the 'real' finder as well
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.NXDOMAIN, result)
+        self.assertEqual(None, rrset)
+
+        # now add it again
+        updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+        updater.add_rrset(rrset_to_delete)
+        updater.commit()
+
+        # second commit should throw
+        self.assertRaises(isc.datasrc.Error, updater.commit)
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+    def test_two_modules(self):
+        # load two modules, and check if they don't interfere
+        mem_cfg = { "type": "memory", "class": "IN", "zones": [] };
+        dsc_mem = isc.datasrc.DataSourceClient("memory", json.dumps(mem_cfg))
+        dsc_sql = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
+
+        # check if exceptions are working
+        self.assertRaises(isc.datasrc.Error, isc.datasrc.DataSourceClient,
+                          "memory", "{}")
+        self.assertRaises(isc.datasrc.Error, isc.datasrc.DataSourceClient,
+                          "sqlite3", "{}")
+
+        # see if a lookup succeeds in sqlite3 ds
+        result, finder = dsc_sql.find_zone(isc.dns.Name("example.com"))
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+        self.assertEqual("example.com.", finder.get_origin().to_text())
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # see if a lookup fails in mem ds
+        result, finder = dsc_mem.find_zone(isc.dns.Name("example.com"))
+        self.assertEqual(finder.NXDOMAIN, result)
+
+
+    def test_update_delete_abort(self):
+        # we don't do enything with this one, just making sure loading two
+        # datasources
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+
+        # first make sure, through a separate finder, that some record exists
+        result, finder = dsc.find_zone(isc.dns.Name("example.com"))
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual(isc.dns.RRClass.IN(), finder.get_class())
+        self.assertEqual("example.com.", finder.get_origin().to_text())
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        rrset_to_delete = rrset;
+
+        # can't delete rrset with associated sig. Abuse that to force an
+        # exception first, then remove the sig, then delete the record
+        updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+        self.assertRaises(isc.datasrc.Error, updater.delete_rrset,
+                          rrset_to_delete)
+
+        rrset_to_delete.remove_rrsig()
+
+        updater.delete_rrset(rrset_to_delete)
+
+        # The record should be gone in the updater, but not in the original
+        # finder (since we have not committed)
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A(),
+                                     None,
+                                     finder.FIND_DEFAULT)
+        self.assertEqual(finder.NXDOMAIN, result)
+        self.assertEqual(None, rrset)
+
+        # destroy the updater, which should make it roll back
+        updater = None
+
+        # the record should still be available in the 'real' finder as well
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(),
+                                    None,
+                                    finder.FIND_DEFAULT)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+    def test_update_for_no_zone(self):
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+        self.assertEqual(None,
+                         dsc.get_updater(isc.dns.Name("notexistent.example"),
+                                         True))
+
+    def test_client_reference(self):
+        # Temporarily create various objects using factory methods of the
+        # client.  The created objects won't be stored anywhere and
+        # immediately released.  The creation shouldn't affect the reference
+        # to the base client.
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+        orig_ref = sys.getrefcount(dsc)
+
+        dsc.find_zone(isc.dns.Name("example.com"))
+        self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+        dsc.get_iterator(isc.dns.Name("example.com."))
+        self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+        dsc.get_updater(isc.dns.Name("example.com"), True)
+        self.assertEqual(orig_ref, sys.getrefcount(dsc))
+
+    def test_iterate_over_empty_zone(self):
+        # empty the test zone first
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+        updater = dsc.get_updater(isc.dns.Name("example.com"), True)
+        updater.commit()
+
+        # Check the iterator behavior for the empty zone.
+        iterator = dsc.get_iterator(isc.dns.Name("example.com."))
+        self.assertEqual(None, iterator.get_soa())
+        self.assertEqual(None, iterator.get_next_rrset())
+
+class JournalWrite(unittest.TestCase):
+    def setUp(self):
+        # Make a fresh copy of the writable database with all original content
+        shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+        self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+                                                WRITE_ZONE_DB_CONFIG)
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+
+    def tearDown(self):
+        self.dsc = None
+        self.updater = None
+
+    def check_journal(self, expected_list):
+        # This assumes sqlite3 DB and directly fetches stored data from
+        # the DB file.  It should be generalized using ZoneJournalReader
+        # once it's supported.
+        conn = sqlite3.connect(WRITE_ZONE_DB_FILE)
+        cur = conn.cursor()
+        cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+        actual_list = cur.fetchall()
+        self.assertEqual(len(expected_list), len(actual_list))
+        for (expected, actual) in zip(expected_list, actual_list):
+            self.assertEqual(expected, actual)
+        conn.close()
+
+    def create_a(self, address):
+        a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
+                     RRTTL(3600))
+        a_rr.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+        return (a_rr)
+
+    def test_journal_write(self):
+        # This is a straightforward port of the C++ 'journal' test
+        # Note: we add/delete 'out of zone' data (example.org in the
+        # example.com zone for convenience.
+        self.updater.delete_rrset(create_soa(1234))
+        self.updater.delete_rrset(self.create_a('192.0.2.2'))
+        self.updater.add_rrset(create_soa(1235))
+        self.updater.add_rrset(self.create_a('192.0.2.2'))
+        self.updater.commit()
+
+        expected = []
+        expected.append(("example.org.", "SOA", 3600,
+                         "ns1.example.org. admin.example.org. " +
+                         "1234 3600 1800 2419200 7200"))
+        expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+        expected.append(("example.org.", "SOA", 3600,
+                         "ns1.example.org. admin.example.org. " +
+                         "1235 3600 1800 2419200 7200"))
+        expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+        self.check_journal(expected)
+
+    def test_journal_write_multiple(self):
+        # This is a straightforward port of the C++ 'journalMultiple' test
+        expected = []
+        for i in range(1, 100):
+            self.updater.delete_rrset(create_soa(1234 + i - 1))
+            expected.append(("example.org.", "SOA", 3600,
+                             "ns1.example.org. admin.example.org. " +
+                             str(1234 + i - 1) + " 3600 1800 2419200 7200"))
+            self.updater.add_rrset(create_soa(1234 + i))
+            expected.append(("example.org.", "SOA", 3600,
+                             "ns1.example.org. admin.example.org. " +
+                             str(1234 + i) + " 3600 1800 2419200 7200"))
+        self.updater.commit()
+        self.check_journal(expected)
+
+    def test_journal_write_bad_sequence(self):
+        # This is a straightforward port of the C++ 'journalBadSequence' test
+
+        # Delete A before SOA
+        self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+                          self.create_a('192.0.2.1'))
+        # Add before delete
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+                          create_soa(1234))
+        # Add A before SOA
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.updater.delete_rrset(create_soa(1234))
+        self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+                          self.create_a('192.0.2.1'))
+        # Commit before add
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.updater.delete_rrset(create_soa(1234))
+        self.assertRaises(isc.datasrc.Error, self.updater.commit)
+        # Delete two SOAs
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.updater.delete_rrset(create_soa(1234))
+        self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+                          create_soa(1235))
+        # Add two SOAs
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.updater.delete_rrset(create_soa(1234))
+        self.updater.add_rrset(create_soa(1235))
+        self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+                          create_soa(1236))
+
+    def test_journal_write_onerase(self):
+        self.updater = None
+        self.assertRaises(isc.datasrc.Error, self.dsc.get_updater,
+                          Name("example.com"), True, True)
+
+    def test_journal_write_badparam(self):
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+        self.assertRaises(TypeError, dsc.get_updater, 0, False, True)
+        self.assertRaises(TypeError, dsc.get_updater, Name('example.com'),
+                          False, 0)
+        self.assertRaises(TypeError, dsc.get_updater, Name("example.com"),
+                          1, True)
+
+class JournalRead(unittest.TestCase):
+    def setUp(self):
+        # Make a fresh copy of the writable database with all original content
+        self.zname = Name('example.com')
+        shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+        self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+                                                WRITE_ZONE_DB_CONFIG)
+        self.reader = None
+
+    def tearDown(self):
+        # Some tests leave the reader in the middle of sequence, holding
+        # the lock.  Since the unittest framework keeps each test object
+        # until the end of the entire tests, we need to make sure the reader
+        # is released at the end of each test.  The client shouldn't do harm
+        # but we clean it up, too, just in case.
+        self.dsc = None
+        self.reader = None
+
+    def make_simple_diff(self, begin_soa):
+        updater = self.dsc.get_updater(self.zname, False, True)
+        updater.delete_rrset(begin_soa)
+        updater.add_rrset(create_soa(1235))
+        updater.commit()
+
+    def test_journal_reader(self):
+        # This is a straightforward port of the C++ 'journalReader' test
+        self.make_simple_diff(create_soa(1234))
+        result, self.reader = self.dsc.get_journal_reader(self.zname, 1234,
+                                                          1235)
+        self.assertEqual(ZoneJournalReader.SUCCESS, result)
+        self.assertNotEqual(None, self.reader)
+        rrsets_equal(create_soa(1234), self.reader.get_next_diff())
+        rrsets_equal(create_soa(1235), self.reader.get_next_diff())
+        self.assertEqual(None, self.reader.get_next_diff())
+        self.assertRaises(ValueError, self.reader.get_next_diff)
+
+    def test_journal_reader_with_large_serial(self):
+        # similar to the previous one, but use a very large serial to check
+        # if the python wrapper code has unexpected integer overflow
+        self.make_simple_diff(create_soa(4294967295))
+        result, self.reader = self.dsc.get_journal_reader(self.zname,
+                                                          4294967295, 1235)
+        self.assertNotEqual(None, self.reader)
+        # dump to text and compare them in case create_soa happens to have
+        # an overflow bug
+        self.assertEqual('example.org. 3600 IN SOA ns1.example.org. ' + \
+                         'admin.example.org. 4294967295 3600 1800 ' + \
+                             '2419200 7200\n',
+                         self.reader.get_next_diff().to_text())
+
+    def test_journal_reader_large_journal(self):
+        # This is a straightforward port of the C++ 'readLargeJournal' test.
+        # In this test we use the ZoneJournalReader object as a Python
+        # iterator.
+        updater = self.dsc.get_updater(self.zname, False, True)
+        expected = []
+        for i in range(0, 100):
+            rrset = create_soa(1234 + i)
+            updater.delete_rrset(rrset)
+            expected.append(rrset)
+
+            rrset = create_soa(1234 + i + 1)
+            updater.add_rrset(rrset)
+            expected.append(rrset)
+
+        updater.commit()
+        _, self.reader = self.dsc.get_journal_reader(self.zname, 1234, 1334)
+        self.assertNotEqual(None, self.reader)
+        i = 0
+        for rr in self.reader:
+            self.assertNotEqual(len(expected), i)
+            rrsets_equal(expected[i], rr)
+            i += 1
+        self.assertEqual(len(expected), i)
+
+    def test_journal_reader_no_range(self):
+        # This is a straightforward port of the C++ 'readJournalForNoRange'
+        # test
+        self.make_simple_diff(create_soa(1234))
+        result, self.reader = self.dsc.get_journal_reader(self.zname, 1200,
+                                                          1235)
+        self.assertEqual(ZoneJournalReader.NO_SUCH_VERSION, result)
+        self.assertEqual(None, self.reader)
+
+    def test_journal_reader_no_zone(self):
+        # This is a straightforward port of the C++ 'journalReaderForNXZone'
+        # test
+        result, self.reader = self.dsc.get_journal_reader(Name('nosuchzone'),
+                                                          0, 1)
+        self.assertEqual(ZoneJournalReader.NO_SUCH_ZONE, result)
+        self.assertEqual(None, self.reader)
+
+    def test_journal_reader_bad_params(self):
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          'example.com.', 0, 1)
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          self.zname, 'must be int', 1)
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          self.zname, 0, 'must be int')
+
+    def test_journal_reader_direct_construct(self):
+        # ZoneJournalReader can only be constructed via a factory
+        self.assertRaises(TypeError, ZoneJournalReader)
+
+    def test_journal_reader_old_schema(self):
+        # The database doesn't have a "diffs" table.
+        dbfile = TESTDATA_PATH + 'test.sqlite3.nodiffs'
+        client = isc.datasrc.DataSourceClient("sqlite3",
+                                              "{ \"database_file\": \"" + \
+                                                  dbfile + "\" }")
+        self.assertRaises(isc.datasrc.Error, client.get_journal_reader,
+                          self.zname, 0, 1)
+
+if __name__ == "__main__":
+    isc.log.init("bind10")
+    isc.log.resetUnitTestRootLogger()
+    unittest.main()
diff --git a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
index 707994f..10c61cf 100644
--- a/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
+++ b/src/lib/python/isc/datasrc/tests/sqlite3_ds_test.py
@@ -23,8 +23,9 @@ TESTDATA_PATH = os.environ['TESTDATA_PATH'] + os.sep
 TESTDATA_WRITE_PATH = os.environ['TESTDATA_WRITE_PATH'] + os.sep
 
 READ_ZONE_DB_FILE = TESTDATA_PATH + "example.com.sqlite3"
-WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
 BROKEN_DB_FILE = TESTDATA_PATH + "brokendb.sqlite3"
+WRITE_ZONE_DB_FILE = TESTDATA_WRITE_PATH + "example.com.out.sqlite3"
+NEW_DB_FILE = TESTDATA_WRITE_PATH + "new_db.sqlite3"
 
 def example_reader():
     my_zone = [
@@ -91,5 +92,52 @@ class TestSqlite3_ds(unittest.TestCase):
         # and make sure lock does not stay
         sqlite3_ds.load(WRITE_ZONE_DB_FILE, ".", example_reader)
 
+class NewDBFile(unittest.TestCase):
+    def tearDown(self):
+        # remove the created database after every test
+        if (os.path.exists(NEW_DB_FILE)):
+            os.remove(NEW_DB_FILE)
+
+    def setUp(self):
+        # remove the created database before every test too, just
+        # in case a test got aborted half-way, and cleanup didn't occur
+        if (os.path.exists(NEW_DB_FILE)):
+            os.remove(NEW_DB_FILE)
+
+    def test_new_db(self):
+        self.assertFalse(os.path.exists(NEW_DB_FILE))
+        sqlite3_ds.open(NEW_DB_FILE)
+        self.assertTrue(os.path.exists(NEW_DB_FILE))
+
+    def test_new_db_locked(self):
+        self.assertFalse(os.path.exists(NEW_DB_FILE))
+        con = sqlite3.connect(NEW_DB_FILE);
+        con.isolation_level = None
+        cur = con.cursor()
+        cur.execute("BEGIN IMMEDIATE TRANSACTION")
+
+        # load should now fail, since the database is locked,
+        # and the open() call needs an exclusive lock
+        self.assertRaises(sqlite3.OperationalError,
+                          sqlite3_ds.open, NEW_DB_FILE, 0.1)
+
+        con.rollback()
+        cur.close()
+        con.close()
+        self.assertTrue(os.path.exists(NEW_DB_FILE))
+
+        # now that we closed our connection, load should work again
+        sqlite3_ds.open(NEW_DB_FILE)
+
+        # the database should now have been created, and a new load should
+        # not require an exclusive lock anymore, so we lock it again
+        con = sqlite3.connect(NEW_DB_FILE);
+        cur = con.cursor()
+        cur.execute("BEGIN IMMEDIATE TRANSACTION")
+        sqlite3_ds.open(NEW_DB_FILE, 0.1)
+        con.rollback()
+        cur.close()
+        con.close()
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3
index cc8cfc3..521cf31 100644
Binary files a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 and b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 differ
diff --git a/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/python/isc/datasrc/updater_inc.cc b/src/lib/python/isc/datasrc/updater_inc.cc
new file mode 100644
index 0000000..32715ec
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_inc.cc
@@ -0,0 +1,181 @@
+namespace {
+
+const char* const ZoneUpdater_doc = "\
+The base class to make updates to a single zone.\n\
+\n\
+On construction, each derived class object will start a\n\
+\"transaction\" for making updates to a specific zone (this means a\n\
+constructor of a derived class would normally take parameters to\n\
+identify the zone to be updated). The underlying realization of a\n\
+\"transaction\" will differ for different derived classes; if it uses\n\
+a general purpose database as a backend, it will involve performing\n\
+some form of \"begin transaction\" statement for the database.\n\
+\n\
+Updates (adding or deleting RRs) are made via add_rrset() and\n\
+delete_rrset() methods. Until the commit() method is called the\n\
+changes are local to the updater object. For example, they won't be\n\
+visible via a ZoneFinder object, but only by the updater's own find()\n\
+method. The commit() completes the transaction and makes the changes\n\
+visible to others.\n\
+\n\
+This class does not provide an explicit \"rollback\" interface. If\n\
+something wrong or unexpected happens during the updates and the\n\
+caller wants to cancel the intermediate updates, the caller should\n\
+simply destroy the updater object without calling commit(). The\n\
+destructor is supposed to perform the \"rollback\" operation,\n\
+depending on the internal details of the derived class.\n\
+\n\
+This initial implementation provides a quite simple interface of\n\
+adding and deleting RRs (see the description of the related methods).\n\
+It may be revisited as we gain more experiences.\n\
+\n\
+";
+
+const char* const ZoneUpdater_addRRset_doc = "\
+add_rrset(rrset) -> No return value\n\
+\n\
+Add an RRset to a zone via the updater.\n\
+It performs a few basic checks:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG, i.e., whether\n\
+  get_rrsig() on the RRset returns a NULL pointer.\n\
+\n\
+and otherwise does not check any oddity. For example, it doesn't check\n\
+whether the owner name of the specified RRset is a subdomain of the\n\
+zone's origin; it doesn't care whether or not there is already an\n\
+RRset of the same name and RR type in the zone, and if there is,\n\
+whether any of the existing RRs have duplicate RDATA with the added\n\
+ones. If these conditions matter the calling application must examine\n\
+the existing data beforehand using the ZoneFinder returned by\n\
+get_finder().\n\
+\n\
+The validation requirement on the associated RRSIG is temporary. If we\n\
+find it more reasonable and useful to allow adding a pair of RRset and\n\
+its RRSIG RRset as we gain experiences with the interface, we may\n\
+remove this restriction. Until then we explicitly check it to prevent\n\
+accidental misuse.\n\
+\n\
+Conceptually, on successful call to this method, the zone will have\n\
+the specified RRset, and if there is already an RRset of the same name\n\
+and RR type, these two sets will be \"merged\". \"Merged\" means that\n\
+a subsequent call to ZoneFinder.find() for the name and type will\n\
+result in success and the returned RRset will contain all previously\n\
+existing and newly added RDATAs with the TTL being the minimum of the\n\
+two RRsets. The underlying representation of the \"merged\" RRsets may\n\
+vary depending on the characteristic of the underlying data source.\n\
+For example, if it uses a general purpose database that stores each RR\n\
+of the same RRset separately, it may simply be a larger sets of RRs\n\
+based on both the existing and added RRsets; the TTLs of the RRs may\n\
+be different within the database, and there may even be duplicate RRs\n\
+in different database rows. As long as the RRset returned via\n\
+ZoneFinder.find() conforms to the concept of \"merge\", the actual\n\
+internal representation is up to the implementation.\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if there is already a\n\
+  duplicate RR (that has the same RDATA).\n\
+- we may want to check (and maybe reject) if there is already an RRset\n\
+  of the same name and RR type with different TTL\n\
+- we may even want to check if there is already any RRset of the same\n\
+  name and RR type.\n\
+- we may want to add an \"options\" parameter that can control the\n\
+  above points\n\
+- we may want to have this method return a value containing the\n\
+  information on whether there's a duplicate, etc.\n\
+\n\
+Exceptions:\n\
+  isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+                    internal data source error, or wrapper error\n\
+\n\
+Parameters:\n\
+  rrset      The RRset to be added\n\
+\n\
+";
+
+const char* const ZoneUpdater_deleteRRset_doc = "\
+delete_rrset(rrset) -> No return value\n\
+\n\
+Delete an RRset from a zone via the updater.\n\
+\n\
+Like add_rrset(), the detailed semantics and behavior of this method\n\
+may have to be revisited in a future version. The following are based\n\
+on the initial implementation decisions.\n\
+\n\
+- Existing RRs that don't match any of the specified RDATAs will\n\
+  remain in the zone.\n\
+- Any RRs of the specified RRset that doesn't exist in the zone will\n\
+  simply be ignored; the implementation of this method is not supposed\n\
+  to check that condition.\n\
+- The TTL of the RRset is ignored; matching is only performed by the\n\
+  owner name, RR type and RDATA\n\
+\n\
+Ignoring the TTL may not look sensible, but it's based on the\n\
+observation that it will result in more intuitive result, especially\n\
+when the underlying data source is a general purpose database. See\n\
+also the c++ documentation of DatabaseAccessor::DeleteRecordInZone()\n\
+on this point. It also matches the dynamic update protocol (RFC2136),\n\
+where TTLs are ignored when deleting RRs.\n\
+\n\
+This method performs a limited level of validation on the specified\n\
+RRset:\n\
+- Whether the RR class is identical to that for the zone to be updated\n\
+- Whether the RRset is not empty, i.e., it has at least one RDATA\n\
+- Whether the RRset is not associated with an RRSIG\n\
+\n\
+This method must not be called once commit() is performed. If it calls\n\
+after commit() the implementation must throw a isc.datasrc.Error\n\
+exception.\n\
+\n\
+Todo: As noted above we may have to revisit the design details as we\n\
+gain experiences:\n\
+\n\
+- we may want to check (and maybe reject) if some or all of the RRs\n\
+  for the specified RRset don't exist in the zone\n\
+- we may want to allow an option to \"delete everything\" for\n\
+  specified name and/or specified name + RR type.\n\
+- as mentioned above, we may want to include the TTL in matching the\n\
+  deleted RRs\n\
+- we may want to add an \"options\" parameter that can control the\n\
+  above points\n\
+- we may want to have this method return a value containing the\n\
+  information on whether there's any RRs that are specified but don't\n\
+  exit, the number of actually deleted RRs, etc.\n\
+\n\
+Exceptions:\n\
+  isc.datasrc.Error Called after commit(), RRset is invalid (see above),\n\
+             internal data source error\n\
+  std.bad_alloc Resource allocation failure\n\
+\n\
+Parameters:\n\
+  rrset      The RRset to be deleted\n\
+\n\
+";
+
+const char* const ZoneUpdater_commit_doc = "\
+commit() -> void\n\
+\n\
+Commit the updates made in the updater to the zone.\n\
+\n\
+This method completes the \"transaction\" started at the creation of\n\
+the updater. After successful completion of this method, the updates\n\
+will be visible outside the scope of the updater. The actual internal\n\
+behavior will defer for different derived classes. For a derived class\n\
+with a general purpose database as a backend, for example, this method\n\
+would perform a \"commit\" statement for the database.\n\
+\n\
+This operation can only be performed at most once. A duplicate call\n\
+must result in a isc.datasrc.Error exception.\n\
+\n\
+Exceptions:\n\
+  isc.datasrc.Error Duplicate call of the method, internal data source\n\
+             error, or wrapper error\n\\n\
+\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/updater_python.cc b/src/lib/python/isc/datasrc/updater_python.cc
new file mode 100644
index 0000000..29d2ffe
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.cc
@@ -0,0 +1,288 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Enable this if you use s# variants with PyArg_ParseTuple(), see
+// http://docs.python.org/py3k/c-api/arg.html#strings-and-buffers
+//#define PY_SSIZE_T_CLEAN
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+#include <datasrc/data_source.h>
+#include <datasrc/sqlite3_accessor.h>
+#include <datasrc/zone.h>
+
+#include <dns/python/name_python.h>
+#include <dns/python/rrset_python.h>
+#include <dns/python/rrclass_python.h>
+#include <dns/python/rrtype_python.h>
+
+#include "datasrc.h"
+#include "updater_python.h"
+
+#include "updater_inc.cc"
+#include "finder_inc.cc"
+
+using namespace std;
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace isc_datasrc_internal {
+// See finder_python.cc
+PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args);
+}
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneUpdater : public PyObject {
+public:
+    s_ZoneUpdater() : cppobj(ZoneUpdaterPtr()), base_obj(NULL) {};
+    ZoneUpdaterPtr cppobj;
+    // This is a reference to a base object; if the object of this class
+    // depends on another object to be in scope during its lifetime,
+    // we use INCREF the base object upon creation, and DECREF it at
+    // the end of the destructor
+    // This is an optional argument to createXXX(). If NULL, it is ignored.
+    PyObject* base_obj;
+};
+
+// Shortcut type which would be convenient for adding class variables safely.
+typedef CPPPyObjectContainer<s_ZoneUpdater, ZoneUpdater> ZoneUpdaterContainer;
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+
+// General creation and destruction
+int
+ZoneUpdater_init(s_ZoneUpdater* self, PyObject* args) {
+    // can't be called directly
+    PyErr_SetString(PyExc_TypeError,
+                    "ZoneUpdater cannot be constructed directly");
+
+    return (-1);
+}
+
+void
+ZoneUpdater_destroy(s_ZoneUpdater* const self) {
+    // cppobj is a shared ptr, but to make sure things are not destroyed in
+    // the wrong order, we reset it here.
+    self->cppobj.reset();
+    if (self->base_obj != NULL) {
+        Py_DECREF(self->base_obj);
+    }
+    Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+ZoneUpdater_addRRset(PyObject* po_self, PyObject* args) {
+    s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+    PyObject* rrset_obj;
+    if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+        try {
+            self->cppobj->addRRset(PyRRset_ToRRset(rrset_obj));
+            Py_RETURN_NONE;
+        } catch (const DataSourceError& dse) {
+            PyErr_SetString(getDataSourceException("Error"), dse.what());
+            return (NULL);
+        } catch (const std::exception& exc) {
+            PyErr_SetString(getDataSourceException("Error"), exc.what());
+            return (NULL);
+        }
+    } else {
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneUpdater_deleteRRset(PyObject* po_self, PyObject* args) {
+    s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+    PyObject* rrset_obj;
+    if (PyArg_ParseTuple(args, "O!", &rrset_type, &rrset_obj)) {
+        try {
+            self->cppobj->deleteRRset(PyRRset_ToRRset(rrset_obj));
+            Py_RETURN_NONE;
+        } catch (const DataSourceError& dse) {
+            PyErr_SetString(getDataSourceException("Error"), dse.what());
+            return (NULL);
+        } catch (const std::exception& exc) {
+            PyErr_SetString(getDataSourceException("Error"), exc.what());
+            return (NULL);
+        }
+    } else {
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneUpdater_commit(PyObject* po_self, PyObject*) {
+    s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+    try {
+        self->cppobj->commit();
+        Py_RETURN_NONE;
+    } catch (const DataSourceError& dse) {
+        PyErr_SetString(getDataSourceException("Error"), dse.what());
+        return (NULL);
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneUpdater_getClass(PyObject* po_self, PyObject*) {
+    s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+    try {
+        return (createRRClassObject(self->cppobj->getFinder().getClass()));
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Unexpected exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneUpdater_getOrigin(PyObject* po_self, PyObject*) {
+    s_ZoneUpdater* self = static_cast<s_ZoneUpdater*>(po_self);
+    try {
+        return (createNameObject(self->cppobj->getFinder().getOrigin()));
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Unexpected exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneUpdater_find(PyObject* po_self, PyObject* args) {
+    s_ZoneUpdater* const self = static_cast<s_ZoneUpdater*>(po_self);
+    return (isc_datasrc_internal::ZoneFinder_helper(&self->cppobj->getFinder(),
+                                                    args));
+}
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef ZoneUpdater_methods[] = {
+    { "add_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_addRRset),
+      METH_VARARGS, ZoneUpdater_addRRset_doc },
+    { "delete_rrset", reinterpret_cast<PyCFunction>(ZoneUpdater_deleteRRset),
+      METH_VARARGS, ZoneUpdater_deleteRRset_doc },
+    { "commit", reinterpret_cast<PyCFunction>(ZoneUpdater_commit), METH_NOARGS,
+      ZoneUpdater_commit_doc },
+    // Instead of a getFinder, we implement the finder functionality directly
+    // This is because ZoneFinder is non-copyable, and we should not create
+    // a ZoneFinder object from a reference only (which is what is returned
+    // by getFinder(). Apart from that
+    { "get_origin", reinterpret_cast<PyCFunction>(ZoneUpdater_getOrigin),
+      METH_NOARGS, ZoneFinder_getOrigin_doc },
+    { "get_class", reinterpret_cast<PyCFunction>(ZoneUpdater_getClass),
+      METH_NOARGS, ZoneFinder_getClass_doc },
+    { "find", reinterpret_cast<PyCFunction>(ZoneUpdater_find), METH_VARARGS,
+      ZoneFinder_find_doc },
+    { NULL, NULL, 0, NULL }
+};
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject zoneupdater_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "datasrc.ZoneUpdater",
+    sizeof(s_ZoneUpdater),              // tp_basicsize
+    0,                                  // tp_itemsize
+    reinterpret_cast<destructor>(ZoneUpdater_destroy),// tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    NULL,                               // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    ZoneUpdater_doc,
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    ZoneUpdater_methods,                // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    reinterpret_cast<initproc>(ZoneUpdater_init),// tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source,
+                        PyObject* base_obj)
+{
+    s_ZoneUpdater* py_zu = static_cast<s_ZoneUpdater*>(
+        zoneupdater_type.tp_alloc(&zoneupdater_type, 0));
+    if (py_zu != NULL) {
+        py_zu->cppobj = source;
+        py_zu->base_obj = base_obj;
+        if (base_obj != NULL) {
+            Py_INCREF(base_obj);
+        }
+    }
+    return (py_zu);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+
diff --git a/src/lib/python/isc/datasrc/updater_python.h b/src/lib/python/isc/datasrc/updater_python.h
new file mode 100644
index 0000000..8228578
--- /dev/null
+++ b/src/lib/python/isc/datasrc/updater_python.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_UPDATER_H
+#define __PYTHON_DATASRC_UPDATER_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace datasrc {
+class DataSourceClient;
+
+namespace python {
+
+
+extern PyTypeObject zoneupdater_type;
+
+/// \brief Create a ZoneUpdater python object
+///
+/// \param source The zone iterator pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneUpdater depends on
+///                 It's refcount is increased, and will be decreased when
+///                 this zone iterator is destroyed, making sure that the
+///                 base object is never destroyed before this zone updater.
+PyObject* createZoneUpdaterObject(isc::datasrc::ZoneUpdaterPtr source,
+                                  PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_UPDATER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/dns/Makefile.am b/src/lib/python/isc/dns/Makefile.am
new file mode 100644
index 0000000..b31da93
--- /dev/null
+++ b/src/lib/python/isc/dns/Makefile.am
@@ -0,0 +1,8 @@
+python_PYTHON = __init__.py
+pythondir = $(pyexecdir)/isc/dns
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
+
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index aa12664..2e4a28f 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -28,7 +28,11 @@
 #include <string>
 #include <boost/bind.hpp>
 
+#include <util/python/pycppwrapper_util.h>
+#include <log/log_dbglevels.h>
+
 using namespace isc::log;
+using namespace isc::util::python;
 using std::string;
 using boost::bind;
 
@@ -185,7 +189,7 @@ init(PyObject*, PyObject* args) {
     Py_RETURN_NONE;
 }
 
-// This initialization is for unit tests.  It allows message settings to be
+// This initialization is for unit tests.  It allows message settings to
 // be determined by a set of B10_xxx environment variables.  (See the
 // description of initLogger() for more details.)  The function has been named
 // resetUnitTestRootLogger() here as being more descriptive and
@@ -299,7 +303,8 @@ public:
 extern PyTypeObject logger_type;
 
 int
-Logger_init(LoggerWrapper* self, PyObject* args) {
+Logger_init(PyObject* po_self, PyObject* args, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     const char* name;
     if (!PyArg_ParseTuple(args, "s", &name)) {
         return (-1);
@@ -319,7 +324,9 @@ Logger_init(LoggerWrapper* self, PyObject* args) {
 }
 
 void
-Logger_destroy(LoggerWrapper* const self) {
+//Logger_destroy(LoggerWrapper* const self) {
+Logger_destroy(PyObject* po_self) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     delete self->logger_;
     self->logger_ = NULL;
     Py_TYPE(self)->tp_free(self);
@@ -347,7 +354,8 @@ severityToText(const Severity& severity) {
 }
 
 PyObject*
-Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveSeverity(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     try {
         return (Py_BuildValue("s",
                               severityToText(
@@ -364,7 +372,8 @@ Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
 }
 
 PyObject*
-Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveDebugLevel(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     try {
         return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
     }
@@ -379,7 +388,8 @@ Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
 }
 
 PyObject*
-Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
+Logger_setSeverity(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     const char* severity;
     int dbgLevel = 0;
     if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
@@ -421,27 +431,32 @@ Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
 }
 
 PyObject*
-Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isInfoEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
 }
 
 PyObject*
-Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isWarnEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
 }
 
 PyObject*
-Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isErrorEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
 }
 
 PyObject*
-Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isFatalEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
 }
 
 PyObject*
-Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
+Logger_isDebugEnabled(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     int level = MIN_DEBUG_LEVEL;
     if (!PyArg_ParseTuple(args, "|i", &level)) {
         return (NULL);
@@ -466,53 +481,39 @@ Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
 
 string
 objectToStr(PyObject* object, bool convert) {
-    PyObject* cleanup(NULL);
+    PyObjectContainer objstr_container;
     if (convert) {
-        object = cleanup = PyObject_Str(object);
-        if (object == NULL) {
+        PyObject* text_obj = PyObject_Str(object);
+        if (text_obj == NULL) {
+            // PyObject_Str could fail for various reasons, including because
+            // the object cannot be converted to a string.  We exit with
+            // InternalError to preserve the PyErr set in PyObject_Str.
             throw InternalError();
         }
-    }
-    const char* value;
-    PyObject* tuple(Py_BuildValue("(O)", object));
-    if (tuple == NULL) {
-        if (cleanup != NULL) {
-            Py_DECREF(cleanup);
-        }
-        throw InternalError();
+        objstr_container.reset(text_obj);
+        object = objstr_container.get();
     }
 
-    if (!PyArg_ParseTuple(tuple, "s", &value)) {
-        Py_DECREF(tuple);
-        if (cleanup != NULL) {
-            Py_DECREF(cleanup);
-        }
+    PyObjectContainer tuple_container(Py_BuildValue("(O)", object));
+    const char* value;
+    if (!PyArg_ParseTuple(tuple_container.get(), "s", &value)) {
         throw InternalError();
     }
-    string result(value);
-    Py_DECREF(tuple);
-    if (cleanup != NULL) {
-        Py_DECREF(cleanup);
-    }
-    return (result);
+    return (string(value));
 }
 
 // Generic function to output the logging message. Called by the real functions.
-template<class Function>
+template <class Function>
 PyObject*
 Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
     try {
-        Py_ssize_t number(PyObject_Length(args));
+        const Py_ssize_t number(PyObject_Length(args));
         if (number < 0) {
             return (NULL);
         }
 
         // Which argument is the first to format?
-        size_t start(1);
-        if (dbgLevel) {
-            start ++;
-        }
-
+        const size_t start = dbgLevel ? 2 : 1;
         if (number < start) {
             return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
                                  "logging call, at least %zu needed and %zd "
@@ -520,18 +521,10 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
         }
 
         // Extract the fixed arguments
-        PyObject *midO(PySequence_GetItem(args, start - 1));
-        if (midO == NULL) {
-            return (NULL);
-        }
-        string mid(objectToStr(midO, false));
         long dbg(0);
         if (dbgLevel) {
-            PyObject *dbgO(PySequence_GetItem(args, 0));
-            if (dbgO == NULL) {
-                return (NULL);
-            }
-            dbg = PyLong_AsLong(dbgO);
+            PyObjectContainer dbg_container(PySequence_GetItem(args, 0));
+            dbg = PyLong_AsLong(dbg_container.get());
             if (PyErr_Occurred()) {
                 return (NULL);
             }
@@ -540,16 +533,16 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
         // We create the logging message right now. If we fail to convert a
         // parameter to string, at least the part that we already did will
         // be output
+        PyObjectContainer msgid_container(PySequence_GetItem(args, start - 1));
+        const string mid(objectToStr(msgid_container.get(), false));
         Logger::Formatter formatter(function(dbg, mid.c_str()));
 
         // Now process the rest of parameters, convert each to string and put
         // into the formatter. It will print itself in the end.
         for (size_t i(start); i < number; ++ i) {
-            PyObject* param(PySequence_GetItem(args, i));
-            if (param == NULL) {
-                return (NULL);
-            }
-            formatter = formatter.arg(objectToStr(param, true));
+            PyObjectContainer param_container(PySequence_GetItem(args, i));
+            formatter = formatter.arg(objectToStr(param_container.get(),
+                                                  true));
         }
         Py_RETURN_NONE;
     }
@@ -569,72 +562,74 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
 // Now map the functions into the performOutput. I wish C++ could do
 // functional programming.
 PyObject*
-Logger_debug(LoggerWrapper* self, PyObject* args) {
+Logger_debug(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
                                  args, true));
 }
 
 PyObject*
-Logger_info(LoggerWrapper* self, PyObject* args) {
+Logger_info(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_warn(LoggerWrapper* self, PyObject* args) {
+Logger_warn(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_error(LoggerWrapper* self, PyObject* args) {
+Logger_error(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_fatal(LoggerWrapper* self, PyObject* args) {
+Logger_fatal(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
                                  args, false));
 }
 
 PyMethodDef loggerMethods[] = {
-    { "get_effective_severity",
-        reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
-        METH_NOARGS, "Returns the effective logging severity as string" },
-    { "get_effective_debug_level",
-        reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
-        METH_NOARGS, "Returns the current debug level." },
-    { "set_severity",
-        reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
+    { "get_effective_severity", Logger_getEffectiveSeverity, METH_NOARGS,
+        "Returns the effective logging severity as string" },
+    { "get_effective_debug_level", Logger_getEffectiveDebugLevel, METH_NOARGS,
+        "Returns the current debug level." },
+    { "set_severity", Logger_setSeverity, METH_VARARGS,
         "Sets the severity of a logger. The parameters are severity as a "
         "string and, optionally, a debug level (integer in range 0-99). "
         "The severity may be NULL, in which case an inherited value is taken."
     },
-    { "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
-        METH_VARARGS, "Returns if the logger would log debug message now. "
+    { "is_debug_enabled", Logger_isDebugEnabled, METH_VARARGS,
+      "Returns if the logger would log debug message now. "
             "You can provide a desired debug level." },
-    { "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
-        METH_NOARGS, "Returns if the logger would log info message now." },
-    { "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
-        METH_NOARGS, "Returns if the logger would log warn message now." },
-    { "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
-        METH_NOARGS, "Returns if the logger would log error message now." },
-    { "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
-        METH_NOARGS, "Returns if the logger would log fatal message now." },
-    { "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
+    { "is_info_enabled", Logger_isInfoEnabled, METH_NOARGS,
+      "Returns if the logger would log info message now." },
+    { "is_warn_enabled", Logger_isWarnEnabled, METH_NOARGS,
+      "Returns if the logger would log warn message now." },
+    { "is_error_enabled", Logger_isErrorEnabled, METH_NOARGS,
+      "Returns if the logger would log error message now." },
+    { "is_fatal_enabled", Logger_isFatalEnabled, METH_NOARGS,
+      "Returns if the logger would log fatal message now." },
+    { "debug", Logger_debug, METH_VARARGS,
         "Logs a debug-severity message. It takes the debug level, message ID "
         "and any number of stringifiable arguments to the message." },
-    { "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
+    { "info", Logger_info, METH_VARARGS,
         "Logs a info-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
+    { "warn", Logger_warn, METH_VARARGS,
         "Logs a warn-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
+    { "error", Logger_error, METH_VARARGS,
         "Logs a error-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
+    { "fatal", Logger_fatal, METH_VARARGS,
         "Logs a fatal-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
     { NULL, NULL, 0, NULL }
@@ -645,7 +640,7 @@ PyTypeObject logger_type = {
     "isc.log.Logger",
     sizeof(LoggerWrapper),                 // tp_basicsize
     0,                                  // tp_itemsize
-    reinterpret_cast<destructor>(Logger_destroy),       // tp_dealloc
+    Logger_destroy,                     // tp_dealloc
     NULL,                               // tp_print
     NULL,                               // tp_getattr
     NULL,                               // tp_setattr
@@ -677,7 +672,7 @@ PyTypeObject logger_type = {
     NULL,                               // tp_descr_get
     NULL,                               // tp_descr_set
     0,                                  // tp_dictoffset
-    reinterpret_cast<initproc>(Logger_init),            // tp_init
+    Logger_init,                        // tp_init
     NULL,                               // tp_alloc
     PyType_GenericNew,                  // tp_new
     NULL,                               // tp_free
@@ -714,16 +709,52 @@ PyInit_log(void) {
         return (NULL);
     }
 
-    if (PyType_Ready(&logger_type) < 0) {
-        return (NULL);
-    }
+    // Finalize logger class and add in the definitions of the standard debug
+    // levels.  These can then be referred to in Python through the constants
+    // log.DBGLVL_XXX.
+    // N.B. These should be kept in sync with the constants defined in
+    // log_dbglevels.h.
+    try {
+        if (PyType_Ready(&logger_type) < 0) {
+            throw InternalError();
+        }
+        void* p = &logger_type;
+        if (PyModule_AddObject(mod, "Logger",
+                               static_cast<PyObject*>(p)) < 0) {
+            throw InternalError();
+        }
 
-    if (PyModule_AddObject(mod, "Logger",
-                           static_cast<PyObject*>(static_cast<void*>(
-                               &logger_type))) < 0) {
+        installClassVariable(logger_type, "DBGLVL_START_SHUT",
+                             Py_BuildValue("I", DBGLVL_START_SHUT));
+        installClassVariable(logger_type, "DBGLVL_COMMAND",
+                             Py_BuildValue("I", DBGLVL_COMMAND));
+        installClassVariable(logger_type, "DBGLVL_COMMAND_DATA",
+                             Py_BuildValue("I", DBGLVL_COMMAND_DATA));
+        installClassVariable(logger_type, "DBGLVL_TRACE_BASIC",
+                             Py_BuildValue("I", DBGLVL_TRACE_BASIC));
+        installClassVariable(logger_type, "DBGLVL_TRACE_BASIC_DATA",
+                             Py_BuildValue("I", DBGLVL_TRACE_BASIC_DATA));
+        installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL",
+                             Py_BuildValue("I", DBGLVL_TRACE_DETAIL));
+        installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL_DATA",
+                             Py_BuildValue("I", DBGLVL_TRACE_DETAIL_DATA));
+    } catch (const InternalError&) {
+        Py_DECREF(mod);
+        return (NULL);
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in Log initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+        Py_DECREF(mod);
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+                        "Unexpected failure in Log initialization");
+        Py_DECREF(mod);
         return (NULL);
     }
-    Py_INCREF(&logger_type);
 
+    Py_INCREF(&logger_type);
     return (mod);
 }
diff --git a/src/lib/python/isc/log/tests/Makefile.am b/src/lib/python/isc/log/tests/Makefile.am
index 6bb67de..170eee6 100644
--- a/src/lib/python/isc/log/tests/Makefile.am
+++ b/src/lib/python/isc/log/tests/Makefile.am
@@ -1,28 +1,40 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
-PYTESTS = log_test.py
-EXTRA_DIST = $(PYTESTS) log_console.py.in console.out check_output.sh
+PYTESTS_GEN = log_console.py
+PYTESTS_NOGEN = log_test.py
+noinst_SCRIPTS = $(PYTESTS_GEN)
+EXTRA_DIST = console.out check_output.sh $(PYTESTS_NOGEN)
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
+# We need to run the cycle twice, because once the files are in builddir, once in srcdir
 check-local:
+	chmod +x $(abs_builddir)/log_console.py
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log \
 	$(abs_srcdir)/check_output.sh $(abs_builddir)/log_console.py $(abs_srcdir)/console.out
 if ENABLE_PYTHON_COVERAGE
 	touch $(abs_top_srcdir)/.coverage
 	rm -f .coverage
 	${LN_S} $(abs_top_srcdir)/.coverage .coverage
 endif
-	for pytest in $(PYTESTS) ; do \
+	for pytest in $(PYTESTS_NOGEN) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
 	B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+	done ; \
+	for pytest in $(PYTESTS_GEN) ; do \
+	echo Running test: $$pytest ; \
+	chmod +x $(abs_builddir)/$$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/log/python/.libs \
+	B10_TEST_PLUGIN_DIR=$(abs_top_srcdir)/src/bin/cfgmgr/plugins \
+	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index 4292b6c..1337654 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -17,6 +17,7 @@
 import isc.log
 import unittest
 import json
+import sys
 import bind10_config
 from isc.config.ccsession import path_search
 
@@ -89,6 +90,7 @@ class Logger(unittest.TestCase):
     def setUp(self):
         isc.log.init("root", "DEBUG", 50)
         self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
+        self.TEST_MSG = isc.log.create_message('TEST_MESSAGE', '%1')
 
     # Checks defaults of the logger
     def defaults(self, logger):
@@ -159,5 +161,44 @@ class Logger(unittest.TestCase):
         # Bad type
         self.assertRaises(TypeError, logger.debug, "42", "hello")
 
+    def test_dbglevel_constants(self):
+        """
+            Just check a constant to make sure it is defined and is the
+            correct value.  (The constant chosen has a non-zero value to
+            ensure that the code has both define the constant and set its
+            value correctly.)
+        """
+        logger = isc.log.Logger("child")
+        self.assertEqual(logger.DBGLVL_COMMAND, 10)
+
+    def test_param_reference(self):
+        """
+        Check whether passing a parameter to a logger causes a reference leak.
+        """
+        class LogParam:
+            def __str__(self):
+                return 'LogParam'
+        logger = isc.log.Logger("child")
+        param = LogParam()
+        orig_msgrefcnt = sys.getrefcount(param)
+        orig_idrefcnt = sys.getrefcount(self.TEST_MSG)
+        logger.info(self.TEST_MSG, param);
+        self.assertEqual(sys.getrefcount(self.TEST_MSG), orig_idrefcnt)
+        self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+        # intentionally pass an invalid type for debug level.  It will
+        # result in TypeError.  The passed object still shouldn't leak a
+        # reference.
+        self.assertRaises(TypeError, logger.debug, param, self.TEST_MSG, param)
+        self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+    def test_bad_parameter(self):
+        # a log parameter cannot be converted to a string object.
+        class LogParam:
+            def __str__(self):
+                raise ValueError("LogParam can't be converted to string")
+        logger = isc.log.Logger("child")
+        self.assertRaises(ValueError, logger.info, self.TEST_MSG, LogParam())
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/src/lib/python/isc/log_messages/Makefile.am b/src/lib/python/isc/log_messages/Makefile.am
new file mode 100644
index 0000000..30f8374
--- /dev/null
+++ b/src/lib/python/isc/log_messages/Makefile.am
@@ -0,0 +1,32 @@
+SUBDIRS = work
+
+EXTRA_DIST = __init__.py
+EXTRA_DIST += bind10_messages.py
+EXTRA_DIST += cmdctl_messages.py
+EXTRA_DIST += stats_messages.py
+EXTRA_DIST += stats_httpd_messages.py
+EXTRA_DIST += xfrin_messages.py
+EXTRA_DIST += xfrout_messages.py
+EXTRA_DIST += zonemgr_messages.py
+EXTRA_DIST += cfgmgr_messages.py
+EXTRA_DIST += config_messages.py
+EXTRA_DIST += notify_out_messages.py
+EXTRA_DIST += libxfrin_messages.py
+
+CLEANFILES = __init__.pyc
+CLEANFILES += bind10_messages.pyc
+CLEANFILES += cmdctl_messages.pyc
+CLEANFILES += stats_messages.pyc
+CLEANFILES += stats_httpd_messages.pyc
+CLEANFILES += xfrin_messages.pyc
+CLEANFILES += xfrout_messages.pyc
+CLEANFILES += zonemgr_messages.pyc
+CLEANFILES += cfgmgr_messages.pyc
+CLEANFILES += config_messages.pyc
+CLEANFILES += notify_out_messages.pyc
+CLEANFILES += libxfrin_messages.pyc
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log_messages/README b/src/lib/python/isc/log_messages/README
new file mode 100644
index 0000000..c96f78c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/README
@@ -0,0 +1,68 @@
+This is a placeholder package for logging messages of various modules
+in the form of python scripts.  This package is expected to be installed
+somewhere like <top-install-dir>/python3.x/site-packages/isc/log_messages
+and each message script is expected to be imported as
+"isc.log_messages.some_module_messages".
+
+We also need to allow in-source test code to get access to the message
+scripts in the same manner.  That's why the package is stored in the
+directory that shares the same trailing part as the install directory,
+i.e., isc/log_messages.
+
+Furthermore, we need to support a build mode using a separate build
+tree (such as in the case with 'make distcheck').  In that case if an
+application (via a test script) imports "isc.log_messages.xxx", it
+would try to import the module under the source tree, where the
+generated message script doesn't exist.  So, in the source directory
+(i.e., here) we provide dummy scripts that subsequently import the
+same name of module under the "work" sub-package.  The caller
+application is assumed to have <top_builddir>/src/lib/python/isc/log_messages
+in its module search path (this is done by including
+$(COMMON_PYTHON_PATH) in the PYTHONPATH environment variable),
+which ensures the right directory is chosen.
+
+A python module or program that defines its own log messages needs to
+make sure that the setup described above is implemented.  It's a
+complicated process, but can generally be done by following a common
+pattern:
+
+1. Create the dummy script (see above) for the module and update
+   Makefile.am in this directory accordingly.  See (and use)
+   a helper shell script named gen-forwarder.sh.
+2. Update Makefil.am of the module that defines the log message.  The
+   following are a sample snippet for Makefile.am for a module named
+   "mymodule" (which is supposed to be generated from a file
+   "mymodule_messages.mes").  In many cases it should work simply by
+   replacing 'mymodule' with the actual module name.
+
+====================  begin Makefile.am additions ===================
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.pyc
+
+EXTRA_DIST = mymodule_messages.mes
+
+$(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py : mymodule_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/mymodule_messages.mes
+
+# This rule ensures mymodule_messages.py is (re)generated as a result of
+# 'make'.  If there's no other appropriate target, specify
+# mymodule_messages.py in BUILT_SOURCES.
+mymodule: <other source files> $(PYTHON_LOGMSGPKG_DIR)/work/mymodule_messages.py
+=====================  end Makefile.am additions ====================
+
+Notes:
+- "nodist_" prefix is important.  Without this, 'make distcheck' tries
+  to make _messages.py before actually starting the main build, which
+  would fail because the message compiler isn't built yet.
+- "pylogmessage" is a prefix for python scripts that define log
+  messages and are expected to be installed in the common isc/log_messages
+  directory.   It's intentionally named differently from the common
+  "python" prefix (as in python_PYTHON), because the latter may be
+  used for other scripts in the same Makefile.am file.
+- $(PYTHON_LOGMSGPKG_DIR) should be set to point to this directory (or
+  the corresponding build directory if it's different) by the
+  configure script.
diff --git a/src/lib/python/isc/log_messages/__init__.py b/src/lib/python/isc/log_messages/__init__.py
new file mode 100644
index 0000000..d222b8c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/__init__.py
@@ -0,0 +1,3 @@
+"""
+This is an in-source forwarder package redirecting to work/* scripts.
+"""
diff --git a/src/lib/python/isc/log_messages/bind10_messages.py b/src/lib/python/isc/log_messages/bind10_messages.py
new file mode 100644
index 0000000..68ce94c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/bind10_messages.py
@@ -0,0 +1 @@
+from work.bind10_messages import *
diff --git a/src/lib/python/isc/log_messages/cfgmgr_messages.py b/src/lib/python/isc/log_messages/cfgmgr_messages.py
new file mode 100644
index 0000000..5557100
--- /dev/null
+++ b/src/lib/python/isc/log_messages/cfgmgr_messages.py
@@ -0,0 +1 @@
+from work.cfgmgr_messages import *
diff --git a/src/lib/python/isc/log_messages/cmdctl_messages.py b/src/lib/python/isc/log_messages/cmdctl_messages.py
new file mode 100644
index 0000000..7283d5a
--- /dev/null
+++ b/src/lib/python/isc/log_messages/cmdctl_messages.py
@@ -0,0 +1 @@
+from work.cmdctl_messages import *
diff --git a/src/lib/python/isc/log_messages/config_messages.py b/src/lib/python/isc/log_messages/config_messages.py
new file mode 100644
index 0000000..c557975
--- /dev/null
+++ b/src/lib/python/isc/log_messages/config_messages.py
@@ -0,0 +1 @@
+from work.config_messages import *
diff --git a/src/lib/python/isc/log_messages/gen-forwarder.sh b/src/lib/python/isc/log_messages/gen-forwarder.sh
new file mode 100755
index 0000000..84c2450
--- /dev/null
+++ b/src/lib/python/isc/log_messages/gen-forwarder.sh
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+MODULE_NAME=$1
+if test -z $MODULE_NAME; then
+	echo 'Usage: gen-forwarder.sh module_name'
+	exit 1
+fi
+
+echo "from work.${MODULE_NAME}_messages import *" > ${MODULE_NAME}_messages.py
+echo "Forwarder python script is generated.  Make sure to perform:"
+echo "git add ${MODULE_NAME}_messages.py"
+echo "and add the following to Makefile.am:"
+echo "EXTRA_DIST += ${MODULE_NAME}_messages.py"
+echo "CLEANFILES += ${MODULE_NAME}_messages.pyc"
diff --git a/src/lib/python/isc/log_messages/libxfrin_messages.py b/src/lib/python/isc/log_messages/libxfrin_messages.py
new file mode 100644
index 0000000..74da329
--- /dev/null
+++ b/src/lib/python/isc/log_messages/libxfrin_messages.py
@@ -0,0 +1 @@
+from work.libxfrin_messages import *
diff --git a/src/lib/python/isc/log_messages/notify_out_messages.py b/src/lib/python/isc/log_messages/notify_out_messages.py
new file mode 100644
index 0000000..6aa37ea
--- /dev/null
+++ b/src/lib/python/isc/log_messages/notify_out_messages.py
@@ -0,0 +1 @@
+from work.notify_out_messages import *
diff --git a/src/lib/python/isc/log_messages/stats_httpd_messages.py b/src/lib/python/isc/log_messages/stats_httpd_messages.py
new file mode 100644
index 0000000..7782c34
--- /dev/null
+++ b/src/lib/python/isc/log_messages/stats_httpd_messages.py
@@ -0,0 +1 @@
+from work.stats_httpd_messages import *
diff --git a/src/lib/python/isc/log_messages/stats_messages.py b/src/lib/python/isc/log_messages/stats_messages.py
new file mode 100644
index 0000000..1324cfc
--- /dev/null
+++ b/src/lib/python/isc/log_messages/stats_messages.py
@@ -0,0 +1 @@
+from work.stats_messages import *
diff --git a/src/lib/python/isc/log_messages/work/Makefile.am b/src/lib/python/isc/log_messages/work/Makefile.am
new file mode 100644
index 0000000..9bc5e0f
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/Makefile.am
@@ -0,0 +1,12 @@
+# .py is generated in the builddir by the configure script so that test
+# scripts can refer to it when a separate builddir is used.
+
+python_PYTHON = __init__.py
+
+pythondir = $(pyexecdir)/isc/log_messages/
+
+CLEANFILES = __init__.pyc
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/log_messages/work/__init__.py.in b/src/lib/python/isc/log_messages/work/__init__.py.in
new file mode 100644
index 0000000..991f10a
--- /dev/null
+++ b/src/lib/python/isc/log_messages/work/__init__.py.in
@@ -0,0 +1,3 @@
+"""
+This package is a placeholder for python scripts of log messages.
+"""
diff --git a/src/lib/python/isc/log_messages/xfrin_messages.py b/src/lib/python/isc/log_messages/xfrin_messages.py
new file mode 100644
index 0000000..b412519
--- /dev/null
+++ b/src/lib/python/isc/log_messages/xfrin_messages.py
@@ -0,0 +1 @@
+from work.xfrin_messages import *
diff --git a/src/lib/python/isc/log_messages/xfrout_messages.py b/src/lib/python/isc/log_messages/xfrout_messages.py
new file mode 100644
index 0000000..2093d5c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/xfrout_messages.py
@@ -0,0 +1 @@
+from work.xfrout_messages import *
diff --git a/src/lib/python/isc/log_messages/zonemgr_messages.py b/src/lib/python/isc/log_messages/zonemgr_messages.py
new file mode 100644
index 0000000..b3afe9c
--- /dev/null
+++ b/src/lib/python/isc/log_messages/zonemgr_messages.py
@@ -0,0 +1 @@
+from work.zonemgr_messages import *
diff --git a/src/lib/python/isc/net/tests/Makefile.am b/src/lib/python/isc/net/tests/Makefile.am
index 3a04f17..dd94946 100644
--- a/src/lib/python/isc/net/tests/Makefile.am
+++ b/src/lib/python/isc/net/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/notify/Makefile.am b/src/lib/python/isc/notify/Makefile.am
index a23a1ff..c247ab8 100644
--- a/src/lib/python/isc/notify/Makefile.am
+++ b/src/lib/python/isc/notify/Makefile.am
@@ -1,21 +1,22 @@
 SUBDIRS = . tests
 
 python_PYTHON = __init__.py notify_out.py
-pyexec_DATA = $(top_builddir)/src/lib/python/notify_out_messages.py
-
 pythondir = $(pyexecdir)/isc/notify
 
-$(top_builddir)/src/lib/python/notify_out_messages.py: notify_out_messages.mes
-	$(top_builddir)/src/lib/log/compiler/message \
-		-p -d $(top_builddir)/src/lib/python \
-		$(top_srcdir)/src/lib/python/isc/notify/notify_out_messages.mes
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
 
 EXTRA_DIST = notify_out_messages.mes
 
-CLEANFILES =  $(top_builddir)/src/lib/python/notify_out_messages.pyc
-CLEANFILES += $(top_builddir)/src/lib/python/notify_out_messages.py
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.pyc
 
 CLEANDIRS = __pycache__
 
+$(PYTHON_LOGMSGPKG_DIR)/work/notify_out_messages.py : notify_out_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+	-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/notify_out_messages.mes
+
 clean-local:
 	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index f1e02ca..64a4b3e 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -21,9 +21,10 @@ import threading
 import time
 import errno
 from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient
 from isc.net import addr
 import isc
-from notify_out_messages import *
+from isc.log_messages.notify_out_messages import *
 
 logger = isc.log.Logger("notify_out")
 
@@ -31,7 +32,7 @@ logger = isc.log.Logger("notify_out")
 # we can't import we should not start anyway, and logging an error
 # is a bad idea since the logging system is most likely not
 # initialized yet. see trac ticket #1103
-from pydnspp import *
+from isc.dns import *
 
 ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready'
 _MAX_NOTIFY_NUM = 30
@@ -51,6 +52,24 @@ _BAD_REPLY_PACKET = 5
 
 SOCK_DATA = b's'
 
+# borrowed from xfrin.py @ #1298.  We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+    """Helper function to format a zone name and class as a string of
+       the form '<name>/<class>'.
+       Parameters:
+       zone_name (isc.dns.Name) name to format
+       zone_class (isc.dns.RRClass) class to format
+    """
+    return zone_name.to_text() + '/' + str(zone_class)
+
+class NotifyOutDataSourceError(Exception):
+    """An exception raised when data source error happens within notify out.
+
+    This exception is expected to be caught within the notify_out module.
+
+    """
+    pass
+
 class ZoneNotifyInfo:
     '''This class keeps track of notify-out information for one zone.'''
 
@@ -123,16 +142,20 @@ class NotifyOut:
         self._nonblock_event = threading.Event()
 
     def _init_notify_out(self, datasrc_file):
-        '''Get all the zones name and its notify target's address
+        '''Get all the zones name and its notify target's address.
+
         TODO, currently the zones are got by going through the zone
         table in database. There should be a better way to get them
         and also the setting 'also_notify', and there should be one
-        mechanism to cover the changed datasrc.'''
+        mechanism to cover the changed datasrc.
+
+        '''
         self._db_file = datasrc_file
         for zone_name, zone_class in sqlite3_ds.get_zones_info(datasrc_file):
             zone_id = (zone_name, zone_class)
             self._notify_infos[zone_id] = ZoneNotifyInfo(zone_name, zone_class)
-            slaves = self._get_notify_slaves_from_ns(zone_name)
+            slaves = self._get_notify_slaves_from_ns(Name(zone_name),
+                                                     RRClass(zone_class))
             for item in slaves:
                 self._notify_infos[zone_id].notify_slaves.append((item, 53))
 
@@ -234,7 +257,7 @@ class NotifyOut:
     def _get_rdata_data(self, rr):
         return rr[7].strip()
 
-    def _get_notify_slaves_from_ns(self, zone_name):
+    def _get_notify_slaves_from_ns(self, zone_name, zone_class):
         '''Get all NS records, then remove the primary master from ns rrset,
         then use the name in NS record rdata part to get the a/aaaa records
         in the same zone. the targets listed in a/aaaa record rdata are treated
@@ -242,28 +265,52 @@ class NotifyOut:
         Note: this is the simplest way to get the address of slaves,
         but not correct, it can't handle the delegation slaves, or the CNAME
         and DNAME logic.
-        TODO. the function should be provided by one library.'''
-        ns_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'NS', self._db_file)
-        soa_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
-        ns_rr_name = []
-        for ns in ns_rrset:
-            ns_rr_name.append(self._get_rdata_data(ns))
-
-        if len(soa_rrset) > 0:
-            sname = (soa_rrset[0][sqlite3_ds.RR_RDATA_INDEX].split(' '))[0].strip() #TODO, bad hardcode to get rdata part
-            if sname in ns_rr_name:
-                ns_rr_name.remove(sname)
-
-        addr_list = []
-        for rr_name in ns_rr_name:
-            a_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'A', self._db_file)
-            aaaa_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'AAAA', self._db_file)
-            for rr in a_rrset:
-                addr_list.append(self._get_rdata_data(rr))
-            for rr in aaaa_rrset:
-                addr_list.append(self._get_rdata_data(rr))
-
-        return addr_list
+        TODO. the function should be provided by one library.
+
+        '''
+        # Prepare data source client.  This should eventually be moved to
+        # an earlier stage of initialization and also support multiple
+        # data sources.
+        datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+        try:
+            result, finder = DataSourceClient('sqlite3',
+                                              datasrc_config).find_zone(
+                zone_name)
+        except isc.datasrc.Error as ex:
+            logger.error(NOTIFY_OUT_DATASRC_ACCESS_FAILURE, ex)
+            return []
+        if result is not DataSourceClient.SUCCESS:
+            logger.error(NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND,
+                         format_zone_str(zone_name, zone_class))
+            return []
+
+        result, ns_rrset = finder.find(zone_name, RRType.NS())
+        if result is not finder.SUCCESS or ns_rrset is None:
+            logger.warn(NOTIFY_OUT_ZONE_NO_NS,
+                        format_zone_str(zone_name, zone_class))
+            return []
+        result, soa_rrset = finder.find(zone_name, RRType.SOA())
+        if result is not finder.SUCCESS or soa_rrset is None or \
+                soa_rrset.get_rdata_count() != 1:
+            logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
+                        format_zone_str(zone_name, zone_class))
+            return []           # broken zone anyway, stop here.
+        soa_mname = Name(soa_rrset.get_rdata()[0].to_text().split(' ')[0])
+
+        addrs = []
+        for ns_rdata in ns_rrset.get_rdata():
+            ns_name = Name(ns_rdata.to_text())
+            if soa_mname == ns_name:
+                continue
+            result, rrset = finder.find(ns_name, RRType.A())
+            if result is finder.SUCCESS and rrset is not None:
+                addrs.extend([a.to_text() for a in rrset.get_rdata()])
+
+            result, rrset = finder.find(ns_name, RRType.AAAA())
+            if result is finder.SUCCESS and rrset is not None:
+                addrs.extend([aaaa.to_text() for aaaa in rrset.get_rdata()])
+
+        return addrs
 
     def _prepare_select_info(self):
         '''
@@ -404,8 +451,9 @@ class NotifyOut:
                         self._nonblock_event.set()
 
     def _send_notify_message_udp(self, zone_notify_info, addrinfo):
-        msg, qid = self._create_notify_message(zone_notify_info.zone_name,
-                                               zone_notify_info.zone_class)
+        msg, qid = self._create_notify_message(
+            Name(zone_notify_info.zone_name),
+            RRClass(zone_notify_info.zone_class))
         render = MessageRenderer()
         render.set_length_limit(512)
         msg.to_wire(render)
@@ -426,17 +474,6 @@ class NotifyOut:
 
         return True
 
-    def _create_rrset_from_db_record(self, record, zone_class):
-        '''Create one rrset from one record of datasource, if the schema of record is changed,
-        This function should be updated first. TODO, the function is copied from xfrout, there
-        should be library for creating one rrset. '''
-        rrtype_ = RRType(record[sqlite3_ds.RR_TYPE_INDEX])
-        rdata_ = Rdata(rrtype_, RRClass(zone_class), " ".join(record[sqlite3_ds.RR_RDATA_INDEX:]))
-        rrset_ = RRset(Name(record[sqlite3_ds.RR_NAME_INDEX]), RRClass(zone_class), \
-                       rrtype_, RRTTL( int(record[sqlite3_ds.RR_TTL_INDEX])))
-        rrset_.add_rdata(rdata_)
-        return rrset_
-
     def _create_notify_message(self, zone_name, zone_class):
         msg = Message(Message.RENDER)
         qid = random.randint(0, 0xFFFF)
@@ -444,14 +481,35 @@ class NotifyOut:
         msg.set_opcode(Opcode.NOTIFY())
         msg.set_rcode(Rcode.NOERROR())
         msg.set_header_flag(Message.HEADERFLAG_AA)
-        question = Question(Name(zone_name), RRClass(zone_class), RRType('SOA'))
-        msg.add_question(question)
-        # Add soa record to answer section
-        soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
-        rrset_soa = self._create_rrset_from_db_record(soa_record[0], zone_class)
-        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        msg.add_question(Question(zone_name, zone_class, RRType.SOA()))
+        msg.add_rrset(Message.SECTION_ANSWER, self._get_zone_soa(zone_name,
+                                                                 zone_class))
         return msg, qid
 
+    def _get_zone_soa(self, zone_name, zone_class):
+        # We create (and soon drop) the data source client here because
+        # clients should be thread specific.  We could let the main thread
+        # loop (_dispatcher) create and retain the client in order to avoid
+        # the overhead when we generalize the interface (and we may also
+        # revisit the design of notify_out more substantially anyway).
+        datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+        result, finder = DataSourceClient('sqlite3',
+                                          datasrc_config).find_zone(zone_name)
+        if result is not DataSourceClient.SUCCESS:
+            raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+                                           zone_name.to_text() + '/' +
+                                           zone_class.to_text() + ' not found')
+
+        result, soa_rrset = finder.find(zone_name, RRType.SOA())
+        if result is not finder.SUCCESS or soa_rrset is None or \
+                soa_rrset.get_rdata_count() != 1:
+            raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+                                           zone_name.to_text() + '/' +
+                                           zone_class.to_text() +
+                                           ' is broken: no valid SOA found')
+
+        return soa_rrset
+
     def _handle_notify_reply(self, zone_notify_info, msg_data, from_addr):
         '''Parse the notify reply message.
         rcode will not checked here, If we get the response
diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes
index f9de744..b77a60c 100644
--- a/src/lib/python/isc/notify/notify_out_messages.mes
+++ b/src/lib/python/isc/notify/notify_out_messages.mes
@@ -78,6 +78,27 @@ message, either in the message parser, or while trying to extract data
 from the parsed message. The error is printed, and notify_out will
 treat the response as a bad message, but this does point to a
 programming error, since all exceptions should have been caught
-explicitely. Please file a bug report. Since there was a response,
+explicitly. Please file a bug report. Since there was a response,
 no more notifies will be sent to this server for this notification
 event.
+
+% NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message.  This can be either a
+configuration error or installation setup failure.
+
+% NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source.  This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process.  So this means some critical inconsistency in the data
+source or software bug.
+
+% NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR.  Notify message won't be sent to such a zone.
+
+% NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs.  Notify message won't
+be sent to such a zone.
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 1427d93..3af5991 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -1,12 +1,20 @@
 PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
 PYTESTS = notify_out_test.py
 EXTRA_DIST = $(PYTESTS)
+EXTRA_DIST += testdata/test.sqlite3 testdata/brokentest.sqlite3
+# The rest of the files are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/example.net
+EXTRA_DIST += testdata/nons.example testdata/nosoa.example
+EXTRA_DIST += testdata/multisoa.example
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -18,7 +26,9 @@ if ENABLE_PYTHON_COVERAGE
 endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
+	TESTDATASRCDIR=$(abs_top_srcdir)/src/lib/python/isc/notify/tests/testdata/ \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index 83f6d1a..d64c203 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -19,9 +19,11 @@ import os
 import tempfile
 import time
 import socket
-from isc.datasrc import sqlite3_ds
 from isc.notify import notify_out, SOCK_DATA
 import isc.log
+from isc.dns import *
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
 
 # our fake socket, where we can read and insert messages
 class MockSocket():
@@ -92,10 +94,8 @@ class TestZoneNotifyInfo(unittest.TestCase):
 
 class TestNotifyOut(unittest.TestCase):
     def setUp(self):
-        self._db_file = tempfile.NamedTemporaryFile(delete=False)
-        sqlite3_ds.load(self._db_file.name, 'example.net.', self._example_net_data_reader)
-        sqlite3_ds.load(self._db_file.name, 'example.com.', self._example_com_data_reader)
-        self._notify = notify_out.NotifyOut(self._db_file.name)
+        self._db_file = TESTDATA_SRCDIR + '/test.sqlite3'
+        self._notify = notify_out.NotifyOut(self._db_file)
         self._notify._notify_infos[('example.com.', 'IN')] = MockZoneNotifyInfo('example.com.', 'IN')
         self._notify._notify_infos[('example.com.', 'CH')] = MockZoneNotifyInfo('example.com.', 'CH')
         self._notify._notify_infos[('example.net.', 'IN')] = MockZoneNotifyInfo('example.net.', 'IN')
@@ -110,10 +110,6 @@ class TestNotifyOut(unittest.TestCase):
         com_ch_info = self._notify._notify_infos[('example.com.', 'CH')]
         com_ch_info.notify_slaves.append(('1.1.1.1', 5353))
 
-    def tearDown(self):
-        self._db_file.close()
-        os.unlink(self._db_file.name)
-
     def test_send_notify(self):
         notify_out._MAX_NOTIFY_NUM = 2
 
@@ -309,39 +305,9 @@ class TestNotifyOut(unittest.TestCase):
         self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_READ)
         self.assertNotEqual(cur_tgt, example_net_info._notify_current)
 
-
-    def _example_net_data_reader(self):
-        zone_data = [
-        ('example.net.',         '1000',  'IN',  'SOA', 'a.dns.example.net. mail.example.net. 1 1 1 1 1'),
-        ('example.net.',         '1000',  'IN',  'NS',  'a.dns.example.net.'),
-        ('example.net.',         '1000',  'IN',  'NS',  'b.dns.example.net.'),
-        ('example.net.',         '1000',  'IN',  'NS',  'c.dns.example.net.'),
-        ('a.dns.example.net.',   '1000',  'IN',  'A',    '1.1.1.1'),
-        ('a.dns.example.net.',   '1000',  'IN',  'AAAA', '2:2::2:2'),
-        ('b.dns.example.net.',   '1000',  'IN',  'A',    '3.3.3.3'),
-        ('b.dns.example.net.',   '1000',  'IN',  'AAAA', '4:4::4:4'),
-        ('b.dns.example.net.',   '1000',  'IN',  'AAAA', '5:5::5:5'),
-        ('c.dns.example.net.',   '1000',  'IN',  'A',    '6.6.6.6'),
-        ('c.dns.example.net.',   '1000',  'IN',  'A',    '7.7.7.7'),
-        ('c.dns.example.net.',   '1000',  'IN',  'AAAA', '8:8::8:8')]
-        for item in zone_data:
-            yield item
-
-    def _example_com_data_reader(self):
-        zone_data = [
-        ('example.com.',         '1000',  'IN',  'SOA', 'a.dns.example.com. mail.example.com. 1 1 1 1 1'),
-        ('example.com.',         '1000',  'IN',  'NS',  'a.dns.example.com.'),
-        ('example.com.',         '1000',  'IN',  'NS',  'b.dns.example.com.'),
-        ('example.com.',         '1000',  'IN',  'NS',  'c.dns.example.com.'),
-        ('a.dns.example.com.',   '1000',  'IN',  'A',    '1.1.1.1'),
-        ('b.dns.example.com.',   '1000',  'IN',  'A',    '3.3.3.3'),
-        ('b.dns.example.com.',   '1000',  'IN',  'AAAA', '4:4::4:4'),
-        ('b.dns.example.com.',   '1000',  'IN',  'AAAA', '5:5::5:5')]
-        for item in zone_data:
-            yield item
-
     def test_get_notify_slaves_from_ns(self):
-        records = self._notify._get_notify_slaves_from_ns('example.net.')
+        records = self._notify._get_notify_slaves_from_ns(Name('example.net.'),
+                                                          RRClass.IN())
         self.assertEqual(6, len(records))
         self.assertEqual('8:8::8:8', records[5])
         self.assertEqual('7.7.7.7', records[4])
@@ -350,14 +316,32 @@ class TestNotifyOut(unittest.TestCase):
         self.assertEqual('4:4::4:4', records[1])
         self.assertEqual('3.3.3.3', records[0])
 
-        records = self._notify._get_notify_slaves_from_ns('example.com.')
+        records = self._notify._get_notify_slaves_from_ns(Name('example.com.'),
+                                                          RRClass.IN())
         self.assertEqual(3, len(records))
         self.assertEqual('5:5::5:5', records[2])
         self.assertEqual('4:4::4:4', records[1])
         self.assertEqual('3.3.3.3', records[0])
 
+    def test_get_notify_slaves_from_ns_unusual(self):
+        self._notify._db_file = TESTDATA_SRCDIR + '/brokentest.sqlite3'
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('nons.example'), RRClass.IN()))
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('nosoa.example'), RRClass.IN()))
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('multisoa.example'), RRClass.IN()))
+
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('nosuchzone.example'), RRClass.IN()))
+
+        # This will cause failure in getting access to the data source.
+        self._notify._db_file = TESTDATA_SRCDIR + '/nodir/error.sqlite3'
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('example.com'), RRClass.IN()))
+
     def test_init_notify_out(self):
-        self._notify._init_notify_out(self._db_file.name)
+        self._notify._init_notify_out(self._db_file)
         self.assertListEqual([('3.3.3.3', 53), ('4:4::4:4', 53), ('5:5::5:5', 53)],
                              self._notify._notify_infos[('example.com.', 'IN')].notify_slaves)
 
@@ -417,6 +401,5 @@ class TestNotifyOut(unittest.TestCase):
 
 if __name__== "__main__":
     isc.log.init("bind10")
+    isc.log.resetUnitTestRootLogger()
     unittest.main()
-
-
diff --git a/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
new file mode 100644
index 0000000..61e766c
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 differ
diff --git a/src/lib/python/isc/notify/tests/testdata/example.com b/src/lib/python/isc/notify/tests/testdata/example.com
new file mode 100644
index 0000000..5d59819
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.com
@@ -0,0 +1,10 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+example.com.         1000  IN  SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com.         1000  IN  NS  a.dns.example.com.
+example.com.         1000  IN  NS  b.dns.example.com.
+example.com.         1000  IN  NS  c.dns.example.com.
+a.dns.example.com.   1000  IN  A    1.1.1.1
+b.dns.example.com.   1000  IN  A    3.3.3.3
+b.dns.example.com.   1000  IN  AAAA 4:4::4:4
+b.dns.example.com.   1000  IN  AAAA 5:5::5:5
diff --git a/src/lib/python/isc/notify/tests/testdata/example.net b/src/lib/python/isc/notify/tests/testdata/example.net
new file mode 100644
index 0000000..001d2d9
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.net
@@ -0,0 +1,14 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+example.net.         1000  IN  SOA a.dns.example.net. mail.example.net. 1 1 1 1 1
+example.net.         1000  IN  NS  a.dns.example.net.
+example.net.         1000  IN  NS  b.dns.example.net.
+example.net.         1000  IN  NS  c.dns.example.net.
+a.dns.example.net.   1000  IN  A    1.1.1.1
+a.dns.example.net.   1000  IN  AAAA 2:2::2:2
+b.dns.example.net.   1000  IN  A    3.3.3.3
+b.dns.example.net.   1000  IN  AAAA 4:4::4:4
+b.dns.example.net.   1000  IN  AAAA 5:5::5:5
+c.dns.example.net.   1000  IN  A    6.6.6.6
+c.dns.example.net.   1000  IN  A    7.7.7.7
+c.dns.example.net.   1000  IN  AAAA 8:8::8:8
diff --git a/src/lib/python/isc/notify/tests/testdata/multisoa.example b/src/lib/python/isc/notify/tests/testdata/multisoa.example
new file mode 100644
index 0000000..eca2fbd
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/multisoa.example
@@ -0,0 +1,5 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+multisoa.example.         1000  IN  SOA a.dns.multisoa.example. mail.multisoa.example. 1 1 1 1 1
+multisoa.example.         1000  IN  SOA a.dns.multisoa.example. mail.multisoa.example. 2 2 2 2 2
+multisoa.example.         1000  IN  NS  a.dns.multisoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/nons.example b/src/lib/python/isc/notify/tests/testdata/nons.example
new file mode 100644
index 0000000..c1fc1b8
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nons.example
@@ -0,0 +1,3 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+nons.example.         1000  IN  SOA a.dns.nons.example. mail.nons.example. 1 1 1 1 1
diff --git a/src/lib/python/isc/notify/tests/testdata/nosoa.example b/src/lib/python/isc/notify/tests/testdata/nosoa.example
new file mode 100644
index 0000000..18e87e1
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nosoa.example
@@ -0,0 +1,7 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+;; (SOA has been removed)
+nosoa.example.         1000  IN  SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+nosoa.example.         1000  IN  NS  a.dns.nosoa.example.
+nosoa.example.         1000  IN  NS  b.dns.nosoa.example.
+nosoa.example.         1000  IN  NS  c.dns.nosoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/test.sqlite3 b/src/lib/python/isc/notify/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..e3cadb0
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/test.sqlite3 differ
diff --git a/src/lib/python/isc/testutils/Makefile.am b/src/lib/python/isc/testutils/Makefile.am
index 0b08257..5479d83 100644
--- a/src/lib/python/isc/testutils/Makefile.am
+++ b/src/lib/python/isc/testutils/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py
+EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py rrset_utils.py
 
 CLEANDIRS = __pycache__
 
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
new file mode 100644
index 0000000..7eac772
--- /dev/null
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Utility functions handling DNS RRsets commonly used for tests'''
+
+from isc.dns import *
+
+def rrsets_equal(a, b):
+    '''Compare two RRsets, return True if equal, otherwise False
+
+    We provide this function as part of test utils as we have no direct rrset
+    comparison atm.  There's no accessor for sigs either (so this only checks
+    name, class, type, ttl, and rdata).
+    Also, since we often use fake data in RRSIGs, RRSIG RDATA are not checked.
+
+    '''
+    return a.get_name() == b.get_name() and \
+           a.get_class() == b.get_class() and \
+           a.get_type() == b.get_type() and \
+           a.get_ttl() == b.get_ttl() and \
+           (a.get_type() == RRType.RRSIG() or
+            sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# The following are short cut utilities to create an RRset of a specific
+# RR type with one RDATA.  Many of the RR parameters are common in most
+# tests, so we define default values for them for convenience.
+
+def create_a(name, address, ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.A(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+    return rrset
+
+def create_aaaa(name, address, ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.AAAA(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.AAAA(), RRClass.IN(), address))
+    return rrset
+
+def create_ns(nsname, name=Name('example.com'), ttl=3600):
+    '''For convenience we use a default name often used as a zone name'''
+    rrset = RRset(name, RRClass.IN(), RRType.NS(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
+    return rrset
+
+def create_cname(target='target.example.com', name=Name('example.com'),
+                 ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.CNAME(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.CNAME(), RRClass.IN(), target))
+    return rrset
+
+def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
+    '''Create an RR of a general type with an arbitrary length of RDATA
+
+    If the RR type isn't specified, type of 65300 will be used, which is
+    arbitrarily chosen from the IANA "Reserved for Private Usage" range.
+    The RDATA will be filled with specified length of all-0 data.
+
+    '''
+    rrset = RRset(name, RRClass.IN(), type, RRTTL(ttl))
+    rrset.add_rdata(Rdata(type, RRClass.IN(), '\\# ' +
+                          str(rdlen) + ' ' + '00' * rdlen))
+    return rrset
+
+def create_soa(serial, name=Name('example.com'), ttl=3600):
+    '''For convenience we use a default name often used as a zone name'''
+
+    rrset = RRset(name, RRClass.IN(), RRType.SOA(), RRTTL(ttl))
+    rdata_str = 'master.example.com. admin.example.com. ' + \
+        str(serial) + ' 3600 1800 2419200 7200'
+    rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), rdata_str))
+    return rrset
diff --git a/src/lib/python/isc/util/tests/Makefile.am b/src/lib/python/isc/util/tests/Makefile.am
index c3d35c2..3b882b4 100644
--- a/src/lib/python/isc/util/tests/Makefile.am
+++ b/src/lib/python/isc/util/tests/Makefile.am
@@ -6,7 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
-LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$$$(ENV_LIBRARY_PATH)
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -19,6 +19,6 @@ endif
 	for pytest in $(PYTESTS) ; do \
 	echo Running test: $$pytest ; \
 	$(LIBRARY_PATH_PLACEHOLDER) \
-	env PYTHONPATH=$(abs_top_srcdir)/src/lib/python:$(abs_top_builddir)/src/lib/python:$(abs_top_builddir)/src/lib/dns/python/.libs \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/xfrin/Makefile.am b/src/lib/python/isc/xfrin/Makefile.am
new file mode 100644
index 0000000..5804de6
--- /dev/null
+++ b/src/lib/python/isc/xfrin/Makefile.am
@@ -0,0 +1,23 @@
+SUBDIRS = . tests
+
+python_PYTHON = __init__.py diff.py
+BUILT_SOURCES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+nodist_pylogmessage_PYTHON = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+pylogmessagedir = $(pyexecdir)/isc/log_messages/
+
+EXTRA_DIST = libxfrin_messages.mes
+
+CLEANFILES = $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py
+CLEANFILES += $(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.pyc
+
+# Define rule to build logging source files from message file
+$(PYTHON_LOGMSGPKG_DIR)/work/libxfrin_messages.py: libxfrin_messages.mes
+	$(top_builddir)/src/lib/log/compiler/message \
+		-d $(PYTHON_LOGMSGPKG_DIR)/work -p $(srcdir)/libxfrin_messages.mes
+
+pythondir = $(pyexecdir)/isc/xfrin
+
+CLEANDIRS = __pycache__
+
+clean-local:
+	rm -rf $(CLEANDIRS)
diff --git a/src/lib/python/isc/xfrin/__init__.py b/src/lib/python/isc/xfrin/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
new file mode 100644
index 0000000..38b7f39
--- /dev/null
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -0,0 +1,249 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+This helps the XFR in process with accumulating parts of diff and applying
+it to the datasource.
+
+The name of the module is not yet fully decided. We might want to move it
+under isc.datasrc or somewhere else, because we might want to reuse it with
+future DDNS process. But until then, it lives here.
+"""
+
+import isc.dns
+import isc.log
+from isc.log_messages.libxfrin_messages import *
+
+class NoSuchZone(Exception):
+    """
+    This is raised if a diff for non-existant zone is being created.
+    """
+    pass
+
+"""
+This is the amount of changes we accumulate before calling Diff.apply
+automatically.
+
+The number 100 is just taken from BIND 9. We don't know the rationale
+for exactly this amount, but we think it is just some randomly chosen
+number.
+"""
+# If changing this, modify the tests accordingly as well.
+DIFF_APPLY_TRESHOLD = 100
+
+logger = isc.log.Logger('libxfrin')
+
+class Diff:
+    """
+    The class represents a diff against current state of datasource on
+    one zone. The usual way of working with it is creating it, then putting
+    bunch of changes in and commiting at the end.
+
+    If you change your mind, you can just stop using the object without
+    really commiting it. In that case no changes will happen in the data
+    sounce.
+
+    The class works as a kind of a buffer as well, it does not direct
+    the changes to underlying data source right away, but keeps them for
+    a while.
+    """
+    def __init__(self, ds_client, zone, replace=False, journaling=False):
+        """
+        Initializes the diff to a ready state. It checks the zone exists
+        in the datasource and if not, NoSuchZone is raised. This also creates
+        a transaction in the data source.
+
+        The ds_client is the datasource client containing the zone. Zone is
+        isc.dns.Name object representing the name of the zone (its apex).
+        If replace is True, the content of the whole zone is wiped out before
+        applying the diff.
+
+        If journaling is True, the history of subsequent updates will be
+        recorded as well as the updates themselves as long as the underlying
+        data source support the journaling.  If the data source allows
+        incoming updates but does not support journaling, the Diff object
+        will still continue applying the diffs with disabling journaling.
+
+        You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
+        exceptions.
+        """
+        try:
+            self.__updater = ds_client.get_updater(zone, replace, journaling)
+        except isc.datasrc.NotImplemented as ex:
+            if not journaling:
+                raise ex
+            self.__updater = ds_client.get_updater(zone, replace, False)
+            logger.info(LIBXFRIN_NO_JOURNAL, zone, ds_client)
+        if self.__updater is None:
+            # The no such zone case
+            raise NoSuchZone("Zone " + str(zone) +
+                             " does not exist in the data source " +
+                             str(ds_client))
+        self.__buffer = []
+
+    def __check_commited(self):
+        """
+        This checks if the diff is already commited or broken. If it is, it
+        raises ValueError. This check is for methods that need to work only on
+        yet uncommited diffs.
+        """
+        if self.__updater is None:
+            raise ValueError("The diff is already commited or it has raised " +
+                             "an exception, you come late")
+
+    def __data_common(self, rr, operation):
+        """
+        Schedules an operation with rr.
+
+        It does all the real work of add_data and delete_data, including
+        all checks.
+        """
+        self.__check_commited()
+        if rr.get_rdata_count() != 1:
+            raise ValueError('The rrset must contain exactly 1 Rdata, but ' +
+                             'it holds ' + str(rr.get_rdata_count()))
+        if rr.get_class() != self.__updater.get_class():
+            raise ValueError("The rrset's class " + str(rr.get_class()) +
+                             " does not match updater's " +
+                             str(self.__updater.get_class()))
+        self.__buffer.append((operation, rr))
+        if len(self.__buffer) >= DIFF_APPLY_TRESHOLD:
+            # Time to auto-apply, so the data don't accumulate too much
+            self.apply()
+
+    def add_data(self, rr):
+        """
+        Schedules addition of an RR into the zone in this diff.
+
+        The rr is of isc.dns.RRset type and it must contain only one RR.
+        If this is not the case or if the diff was already commited, this
+        raises the ValueError exception.
+
+        The rr class must match the one of the datasource client. If
+        it does not, ValueError is raised.
+        """
+        self.__data_common(rr, 'add')
+
+    def delete_data(self, rr):
+        """
+        Schedules deleting an RR from the zone in this diff.
+
+        The rr is of isc.dns.RRset type and it must contain only one RR.
+        If this is not the case or if the diff was already commited, this
+        raises the ValueError exception.
+
+        The rr class must match the one of the datasource client. If
+        it does not, ValueError is raised.
+        """
+        self.__data_common(rr, 'delete')
+
+    def compact(self):
+        """
+        Tries to compact the operations in buffer a little by putting some of
+        the operations together, forming RRsets with more than one RR.
+
+        This is called by apply before putting the data into datasource. You
+        may, but not have to, call this manually.
+
+        Currently it merges consecutive same operations on the same
+        domain/type. We could do more fancy things, like sorting by the domain
+        and do more merging, but such diffs should be rare in practice anyway,
+        so we don't bother and do it this simple way.
+        """
+        buf = []
+        for (op, rrset) in self.__buffer:
+            old = buf[-1][1] if len(buf) > 0 else None
+            if old is None or op != buf[-1][0] or \
+                rrset.get_name() != old.get_name() or \
+                rrset.get_type() != old.get_type():
+                buf.append((op, isc.dns.RRset(rrset.get_name(),
+                                              rrset.get_class(),
+                                              rrset.get_type(),
+                                              rrset.get_ttl())))
+            if rrset.get_ttl() != buf[-1][1].get_ttl():
+                logger.warn(LIBXFRIN_DIFFERENT_TTL, rrset.get_ttl(),
+                            buf[-1][1].get_ttl())
+            for rdatum in rrset.get_rdata():
+                buf[-1][1].add_rdata(rdatum)
+        self.__buffer = buf
+
+    def apply(self):
+        """
+        Push the buffered changes inside this diff down into the data source.
+        This does not stop you from adding more changes later through this
+        diff and it does not close the datasource transaction, so the changes
+        will not be shown to others yet. It just means the internal memory
+        buffer is flushed.
+
+        This is called from time to time automatically, but you can call it
+        manually if you really want to.
+
+        This raises ValueError if the diff was already commited.
+
+        It also can raise isc.datasrc.Error. If that happens, you should stop
+        using this object and abort the modification.
+        """
+        self.__check_commited()
+        # First, compact the data
+        self.compact()
+        try:
+            # Then pass the data inside the data source
+            for (operation, rrset) in self.__buffer:
+                if operation == 'add':
+                    self.__updater.add_rrset(rrset)
+                elif operation == 'delete':
+                    self.__updater.delete_rrset(rrset)
+                else:
+                    raise ValueError('Unknown operation ' + operation)
+            # As everything is already in, drop the buffer
+        except:
+            # If there's a problem, we can't continue.
+            self.__updater = None
+            raise
+
+        self.__buffer = []
+
+    def commit(self):
+        """
+        Writes all the changes into the data source and makes them visible.
+        This closes the diff, you may not use it any more. If you try to use
+        it, you'll get ValueError.
+
+        This might raise isc.datasrc.Error.
+        """
+        self.__check_commited()
+        # Push the data inside the data source
+        self.apply()
+        # Make sure they are visible.
+        try:
+            self.__updater.commit()
+        finally:
+            # Remove the updater. That will free some resources for one, but
+            # mark this object as already commited, so we can check
+
+            # We delete it even in case the commit failed, as that makes us
+            # unusable.
+            self.__updater = None
+
+    def get_buffer(self):
+        """
+        Returns the current buffer of changes not yet passed into the data
+        source. It is in a form like [('add', rrset), ('delete', rrset),
+        ('delete', rrset), ...].
+
+        Probably useful only for testing and introspection purposes. Don't
+        modify the list.
+        """
+        return self.__buffer
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
new file mode 100644
index 0000000..203e31f
--- /dev/null
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -0,0 +1,31 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# No namespace declaration - these constants go in the global namespace
+# of the libxfrin_messages python module.
+
+% LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
+
+% LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled.  At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR).  Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
diff --git a/src/lib/python/isc/xfrin/tests/Makefile.am b/src/lib/python/isc/xfrin/tests/Makefile.am
new file mode 100644
index 0000000..416d62b
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/Makefile.am
@@ -0,0 +1,24 @@
+PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
+PYTESTS = diff_tests.py
+EXTRA_DIST = $(PYTESTS)
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+LIBRARY_PATH_PLACEHOLDER =
+if SET_ENV_LIBRARY_PATH
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+endif
+
+# test using command-line arguments, so use check-local target instead of TESTS
+check-local:
+if ENABLE_PYTHON_COVERAGE
+	touch $(abs_top_srcdir)/.coverage
+	rm -f .coverage
+	${LN_S} $(abs_top_srcdir)/.coverage .coverage
+endif
+	for pytest in $(PYTESTS) ; do \
+	echo Running test: $$pytest ; \
+	$(LIBRARY_PATH_PLACEHOLDER) \
+	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
+	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
+	done
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
new file mode 100644
index 0000000..9944404
--- /dev/null
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -0,0 +1,466 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import isc.log
+import unittest
+import isc.datasrc
+from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
+from isc.xfrin.diff import Diff, NoSuchZone
+
+class TestError(Exception):
+    """
+    Just to have something to be raised during the tests.
+    Not used outside.
+    """
+    pass
+
+class DiffTest(unittest.TestCase):
+    """
+    Tests for the isc.xfrin.diff.Diff class.
+
+    It also plays role of a data source and an updater, so it can manipulate
+    some test variables while being called.
+    """
+    def setUp(self):
+        """
+        This sets internal variables so we can see nothing was called yet.
+
+        It also creates some variables used in multiple tests.
+        """
+        # Track what was called already
+        self.__updater_requested = False
+        self.__compact_called = False
+        self.__data_operations = []
+        self.__apply_called = False
+        self.__commit_called = False
+        self.__broken_called = False
+        self.__warn_called = False
+        self.__should_replace = False
+        # Some common values
+        self.__rrclass = RRClass.IN()
+        self.__type = RRType.A()
+        self.__ttl = RRTTL(3600)
+        # And RRsets
+        # Create two valid rrsets
+        self.__rrset1 = RRset(Name('a.example.org.'), self.__rrclass,
+                              self.__type, self.__ttl)
+        self.__rdata = Rdata(self.__type, self.__rrclass, '192.0.2.1')
+        self.__rrset1.add_rdata(self.__rdata)
+        self.__rrset2 = RRset(Name('b.example.org.'), self.__rrclass,
+                              self.__type, self.__ttl)
+        self.__rrset2.add_rdata(self.__rdata)
+        # And two invalid
+        self.__rrset_empty = RRset(Name('empty.example.org.'), self.__rrclass,
+                                   self.__type, self.__ttl)
+        self.__rrset_multi = RRset(Name('multi.example.org.'), self.__rrclass,
+                                   self.__type, self.__ttl)
+        self.__rrset_multi.add_rdata(self.__rdata)
+        self.__rrset_multi.add_rdata(Rdata(self.__type, self.__rrclass,
+                                           '192.0.2.2'))
+
+    def __mock_compact(self):
+        """
+        This can be put into the diff to hook into its compact method and see
+        if it gets called.
+        """
+        self.__compact_called = True
+
+    def __mock_apply(self):
+        """
+        This can be put into the diff to hook into its apply method and see
+        it gets called.
+        """
+        self.__apply_called = True
+
+    def __broken_operation(self, *args):
+        """
+        This can be used whenever an operation should fail. It raises TestError.
+        It should take whatever amount of parameters needed, so it can be put
+        quite anywhere.
+        """
+        self.__broken_called = True
+        raise TestError("Test error")
+
+    def warn(self, *args):
+        """
+        This is for checking the warn function was called, we replace the logger
+        in the tested module.
+        """
+        self.__warn_called = True
+
+    def commit(self):
+        """
+        This is part of pretending to be a zone updater. This notes the commit
+        was called.
+        """
+        self.__commit_called = True
+
+    def add_rrset(self, rrset):
+        """
+        This one is part of pretending to be a zone updater. It writes down
+        addition of an rrset was requested.
+        """
+        self.__data_operations.append(('add', rrset))
+
+    def delete_rrset(self, rrset):
+        """
+        This one is part of pretending to be a zone updater. It writes down
+        removal of an rrset was requested.
+        """
+        self.__data_operations.append(('delete', rrset))
+
+    def get_class(self):
+        """
+        This one is part of pretending to be a zone updater. It returns
+        the IN class.
+        """
+        return self.__rrclass
+
+    def get_updater(self, zone_name, replace, journaling=False):
+        """
+        This one pretends this is the data source client and serves
+        getting an updater.
+
+        If zone_name is 'none.example.org.', it returns None, otherwise
+        it returns self.
+        """
+        # The diff should not delete the old data.
+        self.assertEqual(self.__should_replace, replace)
+        self.__updater_requested = True
+        if zone_name == Name('none.example.org.'):
+            # Pretend this zone doesn't exist
+            return None
+
+        # If journaling is enabled, record the fact; for a special zone
+        # pretend that we don't support journaling.
+        if journaling:
+            if zone_name == Name('nodiff.example.org'):
+                raise isc.datasrc.NotImplemented('journaling not supported')
+            self.__journaling_enabled = True
+        else:
+            self.__journaling_enabled = False
+
+        return self
+
+    def test_create(self):
+        """
+        This test the case when the diff is successfuly created. It just
+        tries it does not throw and gets the updater.
+        """
+        diff = Diff(self, Name('example.org.'))
+        self.assertTrue(self.__updater_requested)
+        self.assertEqual([], diff.get_buffer())
+        # By default journaling is disabled
+        self.assertFalse(self.__journaling_enabled)
+
+    def test_create_nonexist(self):
+        """
+        Try to create a diff on a zone that doesn't exist. This should
+        raise a correct exception.
+        """
+        self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
+        self.assertTrue(self.__updater_requested)
+
+    def test_create_withjournal(self):
+        Diff(self, Name('example.org'), False, True)
+        self.assertTrue(self.__journaling_enabled)
+
+    def test_create_nojournal(self):
+        Diff(self, Name('nodiff.example.org'), False, True)
+        self.assertFalse(self.__journaling_enabled)
+
+    def __data_common(self, diff, method, operation):
+        """
+        Common part of test for test_add and test_delte.
+        """
+        # Try putting there the bad data first
+        self.assertRaises(ValueError, method, self.__rrset_empty)
+        self.assertRaises(ValueError, method, self.__rrset_multi)
+        # They were not added
+        self.assertEqual([], diff.get_buffer())
+        # Put some proper data into the diff
+        method(self.__rrset1)
+        method(self.__rrset2)
+        dlist = [(operation, self.__rrset1), (operation, self.__rrset2)]
+        self.assertEqual(dlist, diff.get_buffer())
+        # Check the data are not destroyed by raising an exception because of
+        # bad data
+        self.assertRaises(ValueError, method, self.__rrset_empty)
+        self.assertEqual(dlist, diff.get_buffer())
+
+    def test_add(self):
+        """
+        Try to add few items into the diff and see they are stored in there.
+
+        Also try passing an rrset that has differnt amount of RRs than 1.
+        """
+        diff = Diff(self, Name('example.org.'))
+        self.__data_common(diff, diff.add_data, 'add')
+
+    def test_delete(self):
+        """
+        Try scheduling removal of few items into the diff and see they are
+        stored in there.
+
+        Also try passing an rrset that has different amount of RRs than 1.
+        """
+        diff = Diff(self, Name('example.org.'))
+        self.__data_common(diff, diff.delete_data, 'delete')
+
+    def test_apply(self):
+        """
+        Schedule few additions and check the apply works by passing the
+        data into the updater.
+        """
+        # Prepare the diff
+        diff = Diff(self, Name('example.org.'))
+        diff.add_data(self.__rrset1)
+        diff.delete_data(self.__rrset2)
+        dlist = [('add', self.__rrset1), ('delete', self.__rrset2)]
+        self.assertEqual(dlist, diff.get_buffer())
+        # Do the apply, hook the compact method
+        diff.compact = self.__mock_compact
+        diff.apply()
+        # It should call the compact
+        self.assertTrue(self.__compact_called)
+        # And pass the data. Our local history of what happened is the same
+        # format, so we can check the same way
+        self.assertEqual(dlist, self.__data_operations)
+        # And the buffer in diff should become empty, as everything
+        # got inside.
+        self.assertEqual([], diff.get_buffer())
+
+    def test_commit(self):
+        """
+        If we call a commit, it should first apply whatever changes are
+        left (we hook into that instead of checking the effect) and then
+        the commit on the updater should have been called.
+
+        Then we check it raises value error for whatever operation we try.
+        """
+        diff = Diff(self, Name('example.org.'))
+        diff.add_data(self.__rrset1)
+        orig_apply = diff.apply
+        diff.apply = self.__mock_apply
+        diff.commit()
+        self.assertTrue(self.__apply_called)
+        self.assertTrue(self.__commit_called)
+        # The data should be handled by apply which we replaced.
+        self.assertEqual([], self.__data_operations)
+        # Now check all range of other methods raise ValueError
+        self.assertRaises(ValueError, diff.commit)
+        self.assertRaises(ValueError, diff.add_data, self.__rrset2)
+        self.assertRaises(ValueError, diff.delete_data, self.__rrset1)
+        diff.apply = orig_apply
+        self.assertRaises(ValueError, diff.apply)
+        # This one does not state it should raise, so check it doesn't
+        # But it is NOP in this situation anyway
+        diff.compact()
+
+    def test_autoapply(self):
+        """
+        Test the apply is called all by itself after 100 tasks are added.
+        """
+        diff = Diff(self, Name('example.org.'))
+        # A method to check the apply is called _after_ the 100th element
+        # is added. We don't use it anywhere else, so we define it locally
+        # as lambda function
+        def check():
+            self.assertEqual(100, len(diff.get_buffer()))
+            self.__mock_apply()
+        orig_apply = diff.apply
+        diff.apply = check
+        # If we put 99, nothing happens yet
+        for i in range(0, 99):
+            diff.add_data(self.__rrset1)
+        expected = [('add', self.__rrset1)] * 99
+        self.assertEqual(expected, diff.get_buffer())
+        self.assertFalse(self.__apply_called)
+        # Now we push the 100th and it should call the apply method
+        # This will _not_ flush the data yet, as we replaced the method.
+        # It, however, would in the real life.
+        diff.add_data(self.__rrset1)
+        # Now the apply method (which is replaced by our check) should
+        # have been called. If it wasn't, this is false. If it was, but
+        # still with 99 elements, the check would complain
+        self.assertTrue(self.__apply_called)
+        # Reset the buffer by calling the original apply.
+        orig_apply()
+        self.assertEqual([], diff.get_buffer())
+        # Similar with delete
+        self.__apply_called = False
+        for i in range(0, 99):
+            diff.delete_data(self.__rrset2)
+        expected = [('delete', self.__rrset2)] * 99
+        self.assertEqual(expected, diff.get_buffer())
+        self.assertFalse(self.__apply_called)
+        diff.delete_data(self.__rrset2)
+        self.assertTrue(self.__apply_called)
+
+    def test_compact(self):
+        """
+        Test the compaction works as expected, eg. it compacts only consecutive
+        changes of the same operation and on the same domain/type.
+
+        The test case checks that it does merge them, but also puts some
+        different operations "in the middle", changes the type and name and
+        places the same kind of change further away of each other to see they
+        are not merged in that case.
+        """
+        diff = Diff(self, Name('example.org.'))
+        # Check we can do a compact on empty data, it shouldn't break
+        diff.compact()
+        self.assertEqual([], diff.get_buffer())
+        # This data is the way it should look like after the compact
+        # ('operation', 'domain.prefix', 'type', ['rdata', 'rdata'])
+        # The notes say why the each of consecutive can't be merged
+        data = [
+            ('add', 'a', 'A', ['192.0.2.1', '192.0.2.2']),
+            # Different type.
+            ('add', 'a', 'AAAA', ['2001:db8::1', '2001:db8::2']),
+            # Different operation
+            ('delete', 'a', 'AAAA', ['2001:db8::3']),
+            # Different domain
+            ('delete', 'b', 'AAAA', ['2001:db8::4']),
+            # This does not get merged with the first, even if logically
+            # possible. We just don't do this.
+            ('add', 'a', 'A', ['192.0.2.3'])
+            ]
+        # Now, fill the data into the diff, in a "flat" way, one by one
+        for (op, nprefix, rrtype, rdata) in data:
+            name = Name(nprefix + '.example.org.')
+            rrtype_obj = RRType(rrtype)
+            for rdatum in rdata:
+                rrset = RRset(name, self.__rrclass, rrtype_obj, self.__ttl)
+                rrset.add_rdata(Rdata(rrtype_obj, self.__rrclass, rdatum))
+                if op == 'add':
+                    diff.add_data(rrset)
+                else:
+                    diff.delete_data(rrset)
+        # Compact it
+        diff.compact()
+        # Now check they got compacted. They should be in the same order as
+        # pushed inside. So it should be the same as data modulo being in
+        # the rrsets and isc.dns objects.
+        def check():
+            buf = diff.get_buffer()
+            self.assertEqual(len(data), len(buf))
+            for (expected, received) in zip(data, buf):
+                (eop, ename, etype, edata) = expected
+                (rop, rrrset) = received
+                self.assertEqual(eop, rop)
+                ename_obj = Name(ename + '.example.org.')
+                self.assertEqual(ename_obj, rrrset.get_name())
+                # We check on names to make sure they are printed nicely
+                self.assertEqual(etype, str(rrrset.get_type()))
+                rdata = rrrset.get_rdata()
+                self.assertEqual(len(edata), len(rdata))
+                # It should also preserve the order
+                for (edatum, rdatum) in zip(edata, rdata):
+                    self.assertEqual(edatum, str(rdatum))
+        check()
+        # Try another compact does nothing, but survives
+        diff.compact()
+        check()
+
+    def test_wrong_class(self):
+        """
+        Test a wrong class of rrset is rejected.
+        """
+        diff = Diff(self, Name('example.org.'))
+        rrset = RRset(Name('a.example.org.'), RRClass.CH(), RRType.NS(),
+                      self.__ttl)
+        rrset.add_rdata(Rdata(RRType.NS(), RRClass.CH(), 'ns.example.org.'))
+        self.assertRaises(ValueError, diff.add_data, rrset)
+        self.assertRaises(ValueError, diff.delete_data, rrset)
+
+    def __do_raise_test(self):
+        """
+        Do a raise test. Expects that one of the operations is exchanged for
+        broken version.
+        """
+        diff = Diff(self, Name('example.org.'))
+        diff.add_data(self.__rrset1)
+        diff.delete_data(self.__rrset2)
+        self.assertRaises(TestError, diff.commit)
+        self.assertTrue(self.__broken_called)
+        self.assertRaises(ValueError, diff.add_data, self.__rrset1)
+        self.assertRaises(ValueError, diff.delete_data, self.__rrset2)
+        self.assertRaises(ValueError, diff.commit)
+        self.assertRaises(ValueError, diff.apply)
+
+    def test_raise_add(self):
+        """
+        Test the exception from add_rrset is propagated and the diff can't be
+        used afterwards.
+        """
+        self.add_rrset = self.__broken_operation
+        self.__do_raise_test()
+
+    def test_raise_delete(self):
+        """
+        Test the exception from delete_rrset is propagated and the diff can't be
+        used afterwards.
+        """
+        self.delete_rrset = self.__broken_operation
+        self.__do_raise_test()
+
+    def test_raise_commit(self):
+        """
+        Test the exception from updater's commit gets propagated and it can't be
+        used afterwards.
+        """
+        self.commit = self.__broken_operation
+        self.__do_raise_test()
+
+    def test_ttl(self):
+        """
+        Test the TTL handling. A warn function should have been called if they
+        differ, but that's all, it should not crash or raise.
+        """
+        orig_logger = isc.xfrin.diff.logger
+        try:
+            isc.xfrin.diff.logger = self
+            diff = Diff(self, Name('example.org.'))
+            diff.add_data(self.__rrset1)
+            rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+                                  self.__type, RRTTL(120))
+            rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.2'))
+            diff.add_data(rrset2)
+            rrset2 = RRset(Name('a.example.org.'), self.__rrclass,
+                                  self.__type, RRTTL(6000))
+            rrset2.add_rdata(Rdata(self.__type, self.__rrclass, '192.10.2.3'))
+            diff.add_data(rrset2)
+            # They should get compacted together and complain.
+            diff.compact()
+            self.assertEqual(1, len(diff.get_buffer()))
+            # The TTL stays on the first value, no matter if smaller or bigger
+            # ones come later.
+            self.assertEqual(self.__ttl, diff.get_buffer()[0][1].get_ttl())
+            self.assertTrue(self.__warn_called)
+        finally:
+            isc.xfrin.diff.logger = orig_logger
+
+    def test_relpace(self):
+        """
+        Test that when we want to replace the whole zone, it is propagated.
+        """
+        self.__should_replace = True
+        diff = Diff(self, "example.org.", True)
+        self.assertTrue(self.__updater_requested)
+
+if __name__ == "__main__":
+    isc.log.init("bind10")
+    unittest.main()
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index d692dc1..0d3fb4c 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -84,6 +84,7 @@ questionText(const isc::dns::Question& question) {
 /// It is not public function, therefore it's not in header. But it's not
 /// in anonymous namespace, so we can call it from unittests.
 /// \param name The name we want to delegate to.
+/// \param rrclass The class.
 /// \param cache The place too look for known delegations.
 std::string
 deepestDelegation(Name name, RRClass rrclass,
diff --git a/src/lib/resolve/recursive_query.h b/src/lib/resolve/recursive_query.h
index b9fb80d..9af2d72 100644
--- a/src/lib/resolve/recursive_query.h
+++ b/src/lib/resolve/recursive_query.h
@@ -38,7 +38,7 @@ public:
     ///
     /// Adds a round-trip time to the internal vector of times.
     ///
-    /// \param RTT to record.
+    /// \param rtt RTT to record.
     void addRtt(uint32_t rtt) {
         rtt_.push_back(rtt);
     }
@@ -73,6 +73,10 @@ public:
     ///
     /// \param dns_service The DNS Service to perform the recursive
     ///        query on.
+    /// \param nsas Nameserver address store, used to hold information about zone
+    ///        nameservers.
+    /// \param cache Resolver cache object, used to hold information about retrieved
+    ///        records.
     /// \param upstream Addresses and ports of the upstream servers
     ///        to forward queries to.
     /// \param upstream_root Addresses and ports of the root servers
@@ -133,8 +137,10 @@ public:
     /// object.
     ///
     /// \param question The question being answered <qname/qclass/qtype>
-    /// \param answer_message An output Message into which the final response will be copied
-    /// \param buffer An output buffer into which the intermediate responses will be copied
+    /// \param answer_message An output Message into which the final response will
+    ///        be copied.
+    /// \param buffer An output buffer into which the intermediate responses will
+    ///        be copied.
     /// \param server A pointer to the \c DNSServer object handling the client
     void resolve(const isc::dns::Question& question,
                  isc::dns::MessagePtr answer_message,
@@ -147,6 +153,10 @@ public:
     ///  function resolve().
     ///
     /// \param query_message the full query got from client.
+    /// \param answer_message the full answer received from other server.
+    /// \param buffer Output buffer into which the responses will be copied.
+    /// \param server Server object that handles receipt and processing of the
+    ///               received messages.
     /// \param callback callback object
     void forward(isc::dns::ConstMessagePtr query_message,
                  isc::dns::MessagePtr answer_message,
diff --git a/src/lib/resolve/resolve.h b/src/lib/resolve/resolve.h
index 550b620..0a588e2 100644
--- a/src/lib/resolve/resolve.h
+++ b/src/lib/resolve/resolve.h
@@ -37,7 +37,6 @@ namespace resolve {
 /// section), you can simply use this to create an error response.
 ///
 /// \param answer_message The message to clear and place the error in
-/// \param question The question to add to the
 /// \param error_code The error Rcode
 void makeErrorMessage(isc::dns::MessagePtr answer_message,
                       const isc::dns::Rcode& error_code);
diff --git a/src/lib/resolve/resolve_log.h b/src/lib/resolve/resolve_log.h
index 1f2869e..828b9d3 100644
--- a/src/lib/resolve/resolve_log.h
+++ b/src/lib/resolve/resolve_log.h
@@ -27,17 +27,17 @@ namespace resolve {
 /// Note that higher numbers equate to more verbose (and detailed) output.
 
 // The first level traces normal operations
-const int RESLIB_DBG_TRACE = 10;
+const int RESLIB_DBG_TRACE = DBGLVL_TRACE_BASIC;
 
 // The next level extends the normal operations and records the results of the
 // lookups.
-const int RESLIB_DBG_RESULTS = 20;
+const int RESLIB_DBG_RESULTS = DBGLVL_TRACE_BASIC_DATA;
 
 // Report cache lookups and results
-const int RESLIB_DBG_CACHE = 40;
+const int RESLIB_DBG_CACHE = DBGLVL_TRACE_DETAIL_DATA;
 
 // Indicate when callbacks are called
-const int RESLIB_DBG_CB = 50;
+const int RESLIB_DBG_CB = DBGLVL_TRACE_DETAIL_DATA + 10;
 
 
 /// \brief Resolver Library Logger
diff --git a/src/lib/resolve/tests/Makefile.am b/src/lib/resolve/tests/Makefile.am
index ee311a6..cf05d9b 100644
--- a/src/lib/resolve/tests/Makefile.am
+++ b/src/lib/resolve/tests/Makefile.am
@@ -31,6 +31,7 @@ run_unittests_LDADD +=  $(top_builddir)/src/lib/asiolink/libasiolink.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/asiodns/libasiodns.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/resolve/libresolve.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/dns/libdns++.la
+run_unittests_LDADD +=  $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
 run_unittests_LDADD +=  $(top_builddir)/src/lib/exceptions/libexceptions.la
diff --git a/src/lib/server_common/client.h b/src/lib/server_common/client.h
index 1c5928a..8cafb1e 100644
--- a/src/lib/server_common/client.h
+++ b/src/lib/server_common/client.h
@@ -140,7 +140,7 @@ private:
 ///
 /// \param os A \c std::ostream object on which the insertion operation is
 /// performed.
-/// \param edns A reference to an \c Client object output by the operation.
+/// \param client A reference to a \c Client object output by the operation.
 /// \return A reference to the same \c std::ostream object referenced by
 /// parameter \c os after the insertion operation.
 std::ostream& operator<<(std::ostream& os, const Client& client);
diff --git a/src/lib/server_common/logger.h b/src/lib/server_common/logger.h
index cfca1f3..80bc81d 100644
--- a/src/lib/server_common/logger.h
+++ b/src/lib/server_common/logger.h
@@ -18,7 +18,7 @@
 #include <log/macros.h>
 #include <server_common/server_common_messages.h>
 
-/// \file logger.h
+/// \file server_common/logger.h
 /// \brief Server Common library global logger
 ///
 /// This holds the logger for the server common library. It is a private header
@@ -31,12 +31,11 @@ namespace server_common {
 /// \brief The logger for this library
 extern isc::log::Logger logger;
 
-enum {
-    /// \brief Trace basic operations
-    DBG_TRACE_BASIC = 10,
-    /// \brief Print also values used
-    DBG_TRACE_VALUES = 40
-};
+/// \brief Trace basic operations
+const int DBG_TRACE_BASIC = DBGLVL_TRACE_BASIC;
+
+/// \brief Print also values used
+const int DBG_TRACE_VALUES = DBGLVL_TRACE_BASIC_DATA;
 
 }
 }
diff --git a/src/lib/testutils/Makefile.am b/src/lib/testutils/Makefile.am
index ae5c6da..a511d24 100644
--- a/src/lib/testutils/Makefile.am
+++ b/src/lib/testutils/Makefile.am
@@ -5,7 +5,7 @@ AM_CPPFLAGS += $(BOOST_INCLUDES)
 AM_CXXFLAGS=$(B10_CXXFLAGS)
 
 if HAVE_GTEST
-lib_LTLIBRARIES = libtestutils.la
+noinst_LTLIBRARIES = libtestutils.la
 
 libtestutils_la_SOURCES = srv_test.h srv_test.cc
 libtestutils_la_SOURCES += dnsmessage_test.h dnsmessage_test.cc
diff --git a/src/lib/testutils/dnsmessage_test.h b/src/lib/testutils/dnsmessage_test.h
index a8b7284..1aba526 100644
--- a/src/lib/testutils/dnsmessage_test.h
+++ b/src/lib/testutils/dnsmessage_test.h
@@ -21,6 +21,7 @@
 #include <dns/message.h>
 #include <dns/name.h>
 #include <dns/masterload.h>
+#include <dns/rdataclass.h>
 #include <dns/rrclass.h>
 #include <dns/rrset.h>
 
@@ -113,13 +114,32 @@ void rrsetCheck(isc::dns::ConstRRsetPtr expected_rrset,
 /// The definitions in this name space are not supposed to be used publicly,
 /// but are given here because they are used in templated functions.
 namespace detail {
-// Helper matching class used in rrsetsCheck()
+// Helper matching class used in rrsetsCheck().  Basically we only have to
+// check the equality of name, RR type and RR class, but for RRSIGs we need
+// special additional checks because they are essentially different if their
+// 'type covered' are different.  For simplicity, we only compare the types
+// of the first RRSIG RDATAs (and only check when they exist); if there's
+// further difference in the RDATA, the main comparison checks will detect it.
 struct RRsetMatch : public std::unary_function<isc::dns::ConstRRsetPtr, bool> {
     RRsetMatch(isc::dns::ConstRRsetPtr target) : target_(target) {}
     bool operator()(isc::dns::ConstRRsetPtr rrset) const {
-        return (rrset->getType() == target_->getType() &&
-                rrset->getClass() == target_->getClass() &&
-                rrset->getName() == target_->getName());
+        if (rrset->getType() != target_->getType() ||
+            rrset->getClass() != target_->getClass() ||
+            rrset->getName() != target_->getName()) {
+            return (false);
+        }
+        if (rrset->getType() != isc::dns::RRType::RRSIG()) {
+            return (true);
+        }
+        if (rrset->getRdataCount() == 0 || target_->getRdataCount() == 0) {
+            return (true);
+        }
+        isc::dns::RdataIteratorPtr rdit = rrset->getRdataIterator();
+        isc::dns::RdataIteratorPtr targetit = target_->getRdataIterator();
+        return (dynamic_cast<const isc::dns::rdata::generic::RRSIG&>(
+                    rdit->getCurrent()).typeCovered() ==
+                dynamic_cast<const isc::dns::rdata::generic::RRSIG&>(
+                    targetit->getCurrent()).typeCovered());
     }
     const isc::dns::ConstRRsetPtr target_;
 };
diff --git a/src/lib/testutils/testdata/Makefile.am b/src/lib/testutils/testdata/Makefile.am
index 93b9eb9..918d5c5 100644
--- a/src/lib/testutils/testdata/Makefile.am
+++ b/src/lib/testutils/testdata/Makefile.am
@@ -32,4 +32,4 @@ EXTRA_DIST += test2.zone.in
 EXTRA_DIST += test2-new.zone.in
 
 .spec.wire:
-	$(abs_top_builddir)/src/lib/dns/tests/testdata/gen-wiredata.py -o $@ $<
+	$(PYTHON) $(top_builddir)/src/lib/util/python/gen_wiredata.py -o $@ $<
diff --git a/src/lib/util/Makefile.am b/src/lib/util/Makefile.am
index 3db9ac4..0b78b29 100644
--- a/src/lib/util/Makefile.am
+++ b/src/lib/util/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = . io unittests tests pyunittests
+SUBDIRS = . io unittests tests pyunittests python
 
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_srcdir)/src/lib/util -I$(top_builddir)/src/lib/util
diff --git a/src/lib/util/buffer.h b/src/lib/util/buffer.h
index b7a8e28..eb90d64 100644
--- a/src/lib/util/buffer.h
+++ b/src/lib/util/buffer.h
@@ -207,6 +207,24 @@ public:
     }
     //@}
 
+    /// @brief Read specified number of bytes as a vector.
+    ///
+    /// If specified buffer is too short, it will be expanded
+    /// using vector::resize() method.
+    ///
+    /// @param Reference to a buffer (data will be stored there).
+    /// @param Size specified number of bytes to read in a vector.
+    ///
+    void readVector(std::vector<uint8_t>& data, size_t len)
+    {
+        if (position_ + len > len_) {
+            isc_throw(InvalidBufferPosition, "read beyond end of buffer");
+        }
+
+        data.resize(len);
+        readData(&data[0], len);
+    }
+
 private:
     size_t position_;
 
@@ -519,6 +537,6 @@ typedef boost::shared_ptr<OutputBuffer> OutputBufferPtr;
 } // namespace isc
 #endif  // __BUFFER_H
 
-// Local Variables: 
+// Local Variables:
 // mode: c++
-// End: 
+// End:
diff --git a/src/lib/util/filename.h b/src/lib/util/filename.h
index c9874ce..f625938 100644
--- a/src/lib/util/filename.h
+++ b/src/lib/util/filename.h
@@ -103,6 +103,11 @@ public:
         return (extension_);
     }
 
+    /// \return Name + extension of Given File Name
+    std::string nameAndExtension() const {
+        return (name_ + extension_);
+    }
+
     /// \brief Expand Name with Default
     ///
     /// A default file specified is supplied and used to fill in any missing
diff --git a/src/lib/util/io_utilities.h b/src/lib/util/io_utilities.h
index ecab3ce..61d4c9c 100644
--- a/src/lib/util/io_utilities.h
+++ b/src/lib/util/io_utilities.h
@@ -48,13 +48,54 @@ readUint16(const void* buffer) {
 /// \param value 16-bit value to convert
 /// \param buffer Data buffer at least two bytes long into which the 16-bit
 ///        value is written in network-byte order.
-
-inline void
+///
+/// \return pointer to the next byte after stored value
+inline uint8_t*
 writeUint16(uint16_t value, void* buffer) {
     uint8_t* byte_buffer = static_cast<uint8_t*>(buffer);
 
     byte_buffer[0] = static_cast<uint8_t>((value & 0xff00U) >> 8);
     byte_buffer[1] = static_cast<uint8_t>(value & 0x00ffU);
+
+    return (byte_buffer + sizeof(uint16_t));
+}
+
+/// \brief Read Unsigned 32-Bit Integer from Buffer
+///
+/// \param buffer Data buffer at least four bytes long of which the first four
+///        bytes are assumed to represent a 32-bit integer in network-byte
+///        order.
+///
+/// \return Value of 32-bit unsigned integer
+inline uint32_t
+readUint32(const uint8_t* buffer) {
+    const uint8_t* byte_buffer = static_cast<const uint8_t*>(buffer);
+
+    uint32_t result = (static_cast<uint32_t>(byte_buffer[0])) << 24;
+    result |= (static_cast<uint32_t>(byte_buffer[1])) << 16;
+    result |= (static_cast<uint32_t>(byte_buffer[2])) << 8;
+    result |= (static_cast<uint32_t>(byte_buffer[3]));
+
+    return (result);
+}
+
+/// \brief Write Unisgned 32-Bit Integer to Buffer
+///
+/// \param value 32-bit value to convert
+/// \param buffer Data buffer at least four bytes long into which the 32-bit
+///        value is written in network-byte order.
+///
+/// \return pointer to the next byte after stored value
+inline uint8_t*
+writeUint32(uint32_t value, uint8_t* buffer) {
+    uint8_t* byte_buffer = static_cast<uint8_t*>(buffer);
+
+    byte_buffer[0] = static_cast<uint8_t>((value & 0xff000000U) >> 24);
+    byte_buffer[1] = static_cast<uint8_t>((value & 0x00ff0000U) >> 16);
+    byte_buffer[2] = static_cast<uint8_t>((value & 0x0000ff00U) >>  8);
+    byte_buffer[3] = static_cast<uint8_t>((value & 0x000000ffU));
+
+    return (byte_buffer + sizeof(uint32_t));
 }
 
 } // namespace util
diff --git a/src/lib/util/python/Makefile.am b/src/lib/util/python/Makefile.am
new file mode 100644
index 0000000..81d528c
--- /dev/null
+++ b/src/lib/util/python/Makefile.am
@@ -0,0 +1 @@
+noinst_SCRIPTS = gen_wiredata.py mkpywrapper.py
diff --git a/src/lib/util/python/gen_wiredata.py.in b/src/lib/util/python/gen_wiredata.py.in
new file mode 100755
index 0000000..8bd2b3c
--- /dev/null
+++ b/src/lib/util/python/gen_wiredata.py.in
@@ -0,0 +1,1232 @@
+#!@PYTHON@
+
+# Copyright (C) 2010  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Generator of various types of DNS data in the hex format.
+
+This script reads a human readable specification file (called "spec
+file" hereafter) that defines some type of DNS data (an RDATA, an RR,
+or a complete message) and dumps the defined data to a separate file
+as a "wire format" sequence parsable by the
+UnitTestUtil::readWireData() function (currently defined as part of
+libdns++ tests).  Many DNS related tests involve wire format test
+data, so it will be convenient if we can define the data in a more
+intuitive way than writing the entire hex sequence by hand.
+
+Here is a simple example.  Consider the following spec file:
+
+  [custom]
+  sections: a
+  [a]
+  as_rr: True
+
+When the script reads this file, it detects the file specifies a single
+component (called "section" here) that consists of a single A RDATA,
+which must be dumped as an RR (not only the part of RDATA).  It then
+dumps the following content:
+
+  # A RR (QNAME=example.com Class=IN(1) TTL=86400 RDLEN=4)
+  076578616d706c6503636f6d00 0001 0001 00015180 0004
+  # Address=192.0.2.1
+  c0000201
+
+As can be seen, the script automatically completes all variable
+parameters of RRs: owner name, class, TTL, RDATA length and data.  For
+testing purposes many of these will be the same common one (like
+"example.com" or 192.0.2.1), so it would be convenient if we only have
+to specify non default parameters.  To change the RDATA (i.e., the
+IPv4 address), we should add the following line at the end of the spec
+file:
+
+  address: 192.0.2.2
+
+Then the last two lines of the output file will be as follows:
+
+  # Address=192.0.2.2
+  c0000202
+
+In some cases we would rather specify malformed data for tests.  This
+script has the ability to specify broken parameters for many types of
+data.  For example, we can generate data that would look like an A RR
+but the RDLEN is 3 by adding the following line to the spec file:
+
+  rdlen: 3
+
+Then the first two lines of the output file will be as follows:
+
+  # A RR (QNAME=example.com Class=IN(1) TTL=86400 RDLEN=3)
+  076578616d706c6503636f6d00 0001 0001 00015180 0003
+
+** USAGE **
+
+  gen_wiredata.py [-o output_file] spec_file
+
+If the -o option is missing, and if the spec_file has a suffix (such as
+in the form of "data.spec"), the output file name will be the prefix
+part of it (as in "data"); if -o is missing and the spec_file does not
+have a suffix, the script will fail.
+
+** SPEC FILE SYNTAX **
+
+A spec file accepted in this script should be in the form of a
+configuration file that is parsable by the Python's standard
+configparser module.  In short, it consists of sections; each section
+is identified in the form of [section_name] followed by "name: value"
+entries.  Lines beginning with # or ; will be treated as comments.
+Refer to the configparser module documentation for further details of
+the general syntax.
+
+This script has two major modes: the custom mode and the DNS query
+mode.  The former generates an arbitrary combination of DNS message
+header, question section, RDATAs or RRs.  It is mainly intended to
+generate a test data for a single type of RDATA or RR, or for
+complicated complete DNS messages.  The DNS query mode is actually a
+special case of the custom mode, which is a shortcut to generate a
+simple DNS query message (with or without EDNS).
+
+* Custom mode syntax *
+
+By default this script assumes the DNS query mode.  To specify the
+custom mode, there must be a special "custom" section in the spec
+file, which should contain 'sections' entry.  This value of this
+entryis colon-separated string fields, each of which is either
+"header", "question", "edns", "name", or a string specifying an RR
+type.  For RR types the string is lower-cased string mnemonic that
+identifies the type: 'a' for type A, 'ns' for type NS, and so on
+(note: in the current implementation it's case sensitive, and must be
+lower cased).
+
+Each of these fields is interpreted as a section name of the spec
+(configuration), and in that section parameters specific to the
+semantics of the field can be configured.
+
+A "header" section specifies the content of a DNS message header.
+See the documentation of the DNSHeader class of this module for
+configurable parameters.
+
+A "question" section specifies the content of a single question that
+is normally to be placed in the Question section of a DNS message.
+See the documentation of the DNSQuestion class of this module for
+configurable parameters.
+
+An "edns" section specifies the content of an EDNS OPT RR.  See the
+documentation of the EDNS class of this module for configurable
+parameters.
+
+A "name" section specifies a domain name with or without compression.
+This is specifically intended to be used for testing name related
+functionalities and would rarely be used with other sections.  See the
+documentation of the Name class of this module for configurable
+parameters.
+
+In a specific section for an RR or RDATA, possible entries depend on
+the type.  But there are some common configurable entries.  See the
+description of the RR class.  The most important one would be "as_rr".
+It controls whether the entry should be treated as an RR (with name,
+type, class and TTL) or only as an RDATA.  By default as_rr is
+"False", so if an entry is to be intepreted as an RR, an as_rr entry
+must be explicitly specified with a value of "True".
+
+Another common entry is "rdlen".  It specifies the RDLEN field value
+of the RR (note: this is included when the entry is interpreted as
+RDATA, too).  By default this value is automatically determined by the
+RR type and (it has a variable length) from other fields of RDATA, but
+as shown in the above example, it can be explicitly set, possibly to a
+bogus value for testing against invalid data.
+
+For type specific entries (and their defaults when provided), see the
+documentation of the corresponding Python class defined in this
+module.  In general, there should be a class named the same mnemonic
+of the corresponding RR type for each supported type, and they are a
+subclass of the RR class.  For example, the "NS" class is defined for
+RR type NS.
+
+Look again at the A RR example shown at the beginning of this
+description.  There's a "custom" section, which consists of a
+"sections" entry whose value is a single "a", which means the data to
+be generated is an A RR or RDATA.  There's a corresponding "a"
+section, which only specifies that it should be interpreted as an RR
+(all field values of the RR are derived from the default).
+
+If you want to generate a data sequence for two ore more RRs or
+RDATAs, you can specify them in the form of colon-separated fields for
+the "sections" entry.  For example, to generate a sequence of A and NS
+RRs in that order, the "custom" section would be something like this:
+
+  [custom]
+  sections: a:ns
+
+and there must be an "ns" section in addtion to "a".
+
+If a sequence of two or more RRs/RDATAs of the same RR type should be
+generated, these should be uniquely indexed with the "/" separator.
+For example, to generate two A RRs, the "custom" section would be as
+follows:
+
+  [custom]
+  sections: a/1:a/2
+
+and there must be "a/1" and "a/2" sections.
+
+Another practical example that would be used for many tests is to
+generate data for a complete DNS ressponse message.  The spec file of
+such an example configuration would look like as follows:
+
+  [custom]
+  sections: header:question:a
+  [header]
+  qr: 1
+  ancount: 1
+  [question]
+  [a]
+  as_rr: True
+
+With this configuration, this script will generate test data for a DNS
+response to a query for example.com/IN/A containing one corresponding
+A RR in the answer section.
+
+* DNS query mode syntax *
+
+If the spec file does not contain a "custom" section (that has a
+"sections" entry), this script assumes the DNS query mode.  This mode
+is actually a special case of custom mode; it implicitly assumes the
+"sections" entry whose value is "header:question:edns".
+
+In this mode it is expected that the spec file also contains at least
+a "header" and "question" sections, and optionally an "edns" section.
+But the script does not warn or fail even if the expected sections are
+missing.
+
+* Entry value types *
+
+As described above, a section of the spec file accepts entries
+specific to the semantics of the section.  They generally correspond
+to DNS message or RR fields.
+
+Many of them are expected to be integral values, for which either decimal or
+hexadecimal representation is accepted, for example:
+
+  rr_ttl: 3600
+  tag: 0x1234
+
+Some others are expected to be string.  A string value does not have
+to be quated:
+
+  address: 192.0.2.2
+
+but can also be quoated with single quotes:
+
+  address: '192.0.2.2'
+
+Note 1: a string that can be interpreted as an integer must be quated.
+For example, if you want to set a "string" entry to "3600", it should
+be:
+
+  string: '3600'
+
+instead of
+
+  string: 3600
+
+Note 2: a string enclosed with double quotes is not accepted:
+
+  # This doesn't work:
+  address: "192.0.2.2"
+
+In general, string values are converted to hexadecimal sequences
+according to the semantics of the entry.  For instance, a textual IPv4
+address in the above example will be converted to a hexadecimal
+sequence corresponding to a 4-byte integer.  So, in many cases, the
+acceptable syntax for a particular string entry value should be
+obvious from the context.  There are still some exceptional cases
+especially for complicated RR field values, for which the
+corresponding class documentation should be referenced.
+
+One special string syntax that would be worth noting is domain names,
+which would natually be used in many kinds of entries.  The simplest
+form of acceptable syntax is a textual representation of domain names
+such as "example.com" (note: names are always assumed to be
+"absolute", so the trailing dot can be omitted).  But a domain name in
+the wire format can also contain a compression pointer.  This script
+provides a simple support for name compression with a special notation
+of "ptr=nn" where nn is the numeric pointer value (decimal).  For example,
+if the NSDNAME field of an NS RDATA is specified as follows:
+
+  nsname: ns.ptr=12
+
+this script will generate the following output:
+
+  # NS name=ns.ptr=12
+  026e73c00c
+
+** EXTEND THE SCRIPT **
+
+This script is expected to be extended as we add more support for
+various types of RR.  It is encouraged to add support for a new type
+of RR to this script as we see the need for testing that type.  Here
+is a simple instruction of how to do that.
+
+Assume you are adding support for "FOO" RR.  Also assume that the FOO
+RDATA contains a single field named "value".
+
+What you are expected to do is as follows:
+
+- Define a new class named "FOO" inherited from the RR class.  Also
+  define a class variable named "value" for the FOO RDATA field (the
+  variable name can be different from the field name, but it's
+  convenient if it can be easily identifiable.) with an appropriate
+  default value (if possible):
+
+    class FOO(RR):
+        value = 10
+
+  The name of the variable will be (automatically) used as the
+  corresponding entry name in the spec file.  So, a spec file that
+  sets this field to 20 would look like this:
+
+    [foo]
+    value: 20
+
+- Define the "dump()" method for class FOO.  It must call
+  self.dump_header() (which is derived from class RR) at the
+  beginning.  It then prints the RDATA field values in an appropriate
+  way.  Assuming the value is a 16-bit integer field, a complete
+  dump() method would look like this:
+
+    def dump(self, f):
+        if self.rdlen is None:
+            self.rdlen = 2
+        self.dump_header(f, self.rdlen)
+        f.write('# Value=%d\\n' % (self.value))
+        f.write('%04x\\n' % (self.value))
+
+  The first f.write() call is not mandatory, but is encouraged to
+  be provided so that the generated files will be more human readable.
+  Depending on the complexity of the RDATA fields, the dump()
+  implementation would be more complicated.  In particular, if the
+  RDATA length is variable and the RDLEN field value is not specified
+  in the spec file, the dump() method is normally expected to
+  calculate the correct length and pass it to dump_header().  See the
+  implementation of various derived classes of class RR for actual
+  examples.
+"""
+
+import configparser, re, time, socket, sys
+from datetime import datetime
+from optparse import OptionParser
+
+re_hex = re.compile(r'^0x[0-9a-fA-F]+')
+re_decimal = re.compile(r'^\d+$')
+re_string = re.compile(r"\'(.*)\'$")
+
+dnssec_timefmt = '%Y%m%d%H%M%S'
+
+dict_qr = { 'query' : 0, 'response' : 1 }
+dict_opcode = { 'query' : 0, 'iquery' : 1, 'status' : 2, 'notify' : 4,
+                'update' : 5 }
+rdict_opcode = dict([(dict_opcode[k], k.upper()) for k in dict_opcode.keys()])
+dict_rcode = { 'noerror' : 0, 'formerr' : 1, 'servfail' : 2, 'nxdomain' : 3,
+               'notimp' : 4, 'refused' : 5, 'yxdomain' : 6, 'yxrrset' : 7,
+               'nxrrset' : 8, 'notauth' : 9, 'notzone' : 10 }
+rdict_rcode = dict([(dict_rcode[k], k.upper()) for k in dict_rcode.keys()])
+dict_rrtype = { 'none' : 0, 'a' : 1, 'ns' : 2, 'md' : 3, 'mf' : 4, 'cname' : 5,
+                'soa' : 6, 'mb' : 7, 'mg' : 8, 'mr' : 9, 'null' : 10,
+                'wks' : 11, 'ptr' : 12, 'hinfo' : 13, 'minfo' : 14, 'mx' : 15,
+                'txt' : 16, 'rp' : 17, 'afsdb' : 18, 'x25' : 19, 'isdn' : 20,
+                'rt' : 21, 'nsap' : 22, 'nsap_tr' : 23, 'sig' : 24, 'key' : 25,
+                'px' : 26, 'gpos' : 27, 'aaaa' : 28, 'loc' : 29, 'nxt' : 30,
+                'srv' : 33, 'naptr' : 35, 'kx' : 36, 'cert' : 37, 'a6' : 38,
+                'dname' : 39, 'opt' : 41, 'apl' : 42, 'ds' : 43, 'sshfp' : 44,
+                'ipseckey' : 45, 'rrsig' : 46, 'nsec' : 47, 'dnskey' : 48,
+                'dhcid' : 49, 'nsec3' : 50, 'nsec3param' : 51, 'hip' : 55,
+                'spf' : 99, 'unspec' : 103, 'tkey' : 249, 'tsig' : 250,
+                'dlv' : 32769, 'ixfr' : 251, 'axfr' : 252, 'mailb' : 253,
+                'maila' : 254, 'any' : 255 }
+rdict_rrtype = dict([(dict_rrtype[k], k.upper()) for k in dict_rrtype.keys()])
+dict_rrclass = { 'in' : 1, 'ch' : 3, 'hs' : 4, 'any' : 255 }
+rdict_rrclass = dict([(dict_rrclass[k], k.upper()) for k in \
+                          dict_rrclass.keys()])
+dict_algorithm = { 'rsamd5' : 1, 'dh' : 2, 'dsa' : 3, 'ecc' : 4,
+                   'rsasha1' : 5 }
+dict_nsec3_algorithm = { 'reserved' : 0, 'sha1' : 1 }
+rdict_algorithm = dict([(dict_algorithm[k], k.upper()) for k in \
+                            dict_algorithm.keys()])
+rdict_nsec3_algorithm = dict([(dict_nsec3_algorithm[k], k.upper()) for k in \
+                                  dict_nsec3_algorithm.keys()])
+
+header_xtables = { 'qr' : dict_qr, 'opcode' : dict_opcode,
+                   'rcode' : dict_rcode }
+question_xtables = { 'rrtype' : dict_rrtype, 'rrclass' : dict_rrclass }
+
+def parse_value(value, xtable = {}):
+    if re.search(re_hex, value):
+        return int(value, 16)
+    if re.search(re_decimal, value):
+        return int(value)
+    m = re.match(re_string, value)
+    if m:
+        return m.group(1)
+    lovalue = value.lower()
+    if lovalue in xtable:
+        return xtable[lovalue]
+    return value
+
+def code_totext(code, dict):
+    if code in dict.keys():
+        return dict[code] + '(' + str(code) + ')'
+    return str(code)
+
+def encode_name(name, absolute=True):
+    # make sure the name is dot-terminated.  duplicate dots will be ignored
+    # below.
+    name += '.'
+    labels = name.split('.')
+    wire = ''
+    for l in labels:
+        if len(l) > 4 and l[0:4] == 'ptr=':
+            # special meta-syntax for compression pointer
+            wire += '%04x' % (0xc000 | int(l[4:]))
+            break
+        if absolute or len(l) > 0:
+            wire += '%02x' % len(l)
+            wire += ''.join(['%02x' % ord(ch) for ch in l])
+        if len(l) == 0:
+            break
+    return wire
+
+def encode_string(name, len=None):
+    if type(name) is int and len is not None:
+        return '%0.*x' % (len * 2, name)
+    return ''.join(['%02x' % ord(ch) for ch in name])
+
+def count_namelabels(name):
+    if name == '.':             # special case
+        return 0
+    m = re.match('^(.*)\.$', name)
+    if m:
+        name = m.group(1)
+    return len(name.split('.'))
+
+def get_config(config, section, configobj, xtables = {}):
+    try:
+        for field in config.options(section):
+            value = config.get(section, field)
+            if field in xtables.keys():
+                xtable = xtables[field]
+            else:
+                xtable = {}
+            configobj.__dict__[field] = parse_value(value, xtable)
+    except configparser.NoSectionError:
+        return False
+    return True
+
+def print_header(f, input_file):
+    f.write('''###
+### This data file was auto-generated from ''' + input_file + '''
+###
+''')
+
+class Name:
+    '''Implements rendering a single domain name in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - name (string): A textual representation of the name, such as
+      'example.com'.
+    - pointer (int): If specified, compression pointer will be
+      prepended to the generated data with the offset being the value
+      of this parameter.
+    '''
+
+    name = 'example.com'
+    pointer = None                # no compression by default
+    def dump(self, f):
+        name = self.name
+        if self.pointer is not None:
+            if len(name) > 0 and name[-1] != '.':
+                name += '.'
+            name += 'ptr=%d' % self.pointer
+        name_wire = encode_name(name)
+        f.write('\n# DNS Name: %s' % self.name)
+        if self.pointer is not None:
+            f.write(' + compression pointer: %d' % self.pointer)
+        f.write('\n')
+        f.write('%s' % name_wire)
+        f.write('\n')
+
+class DNSHeader:
+    '''Implements rendering a DNS Header section in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - id (16-bit int):
+    - qr, aa, tc, rd, ra, ad, cd (0 or 1): Standard header bits as
+      defined in RFC1035 and RFC4035.  If set to 1, the corresponding
+      bit will be set; if set to 0, it will be cleared.
+    - mbz (0-3): The reserved field of the 3rd and 4th octets of the
+      header.
+    - rcode (4-bit int or string): The RCODE field.  If specified as a
+      string, it must be the commonly used textual mnemonic of the RCODEs
+      (NOERROR, FORMERR, etc, case insensitive).
+    - opcode (4-bit int or string): The OPCODE field.  If specified as
+      a string, it must be the commonly used textual mnemonic of the
+      OPCODEs (QUERY, NOTIFY, etc, case insensitive).
+    - qdcount, ancount, nscount, arcount (16-bit int): The QD/AN/NS/AR
+      COUNT fields, respectively.
+    '''
+
+    id = 0x1035
+    (qr, aa, tc, rd, ra, ad, cd) = 0, 0, 0, 0, 0, 0, 0
+    mbz = 0
+    rcode = 0                   # noerror
+    opcode = 0                  # query
+    (qdcount, ancount, nscount, arcount) = 1, 0, 0, 0
+
+    def dump(self, f):
+        f.write('\n# Header Section\n')
+        f.write('# ID=' + str(self.id))
+        f.write(' QR=' + ('Response' if self.qr else 'Query'))
+        f.write(' Opcode=' + code_totext(self.opcode, rdict_opcode))
+        f.write(' Rcode=' + code_totext(self.rcode, rdict_rcode))
+        f.write('%s' % (' AA' if self.aa else ''))
+        f.write('%s' % (' TC' if self.tc else ''))
+        f.write('%s' % (' RD' if self.rd else ''))
+        f.write('%s' % (' AD' if self.ad else ''))
+        f.write('%s' % (' CD' if self.cd else ''))
+        f.write('\n')
+        f.write('%04x ' % self.id)
+        flag_and_code = 0
+        flag_and_code |= (self.qr << 15 | self.opcode << 14 | self.aa << 10 |
+                          self.tc << 9 | self.rd << 8 | self.ra << 7 |
+                          self.mbz << 6 | self.ad << 5 | self.cd << 4 |
+                          self.rcode)
+        f.write('%04x\n' % flag_and_code)
+        f.write('# QDCNT=%d, ANCNT=%d, NSCNT=%d, ARCNT=%d\n' %
+                (self.qdcount, self.ancount, self.nscount, self.arcount))
+        f.write('%04x %04x %04x %04x\n' % (self.qdcount, self.ancount,
+                                           self.nscount, self.arcount))
+
+class DNSQuestion:
+    '''Implements rendering a DNS question in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - name (string): The QNAME.  The string must be interpreted as a
+      valid domain name.
+    - rrtype (int or string): The question type.  If specified
+      as an integer, it must be the 16-bit RR type value of the
+      covered type.  If specifed as a string, it must be the textual
+      mnemonic of the type.
+    - rrclass (int or string): The question class.  If specified as an
+      integer, it must be the 16-bit RR class value of the covered
+      type.  If specifed as a string, it must be the textual mnemonic
+      of the class.
+    '''
+    name = 'example.com.'
+    rrtype = parse_value('A', dict_rrtype)
+    rrclass = parse_value('IN', dict_rrclass)
+
+    def dump(self, f):
+        f.write('\n# Question Section\n')
+        f.write('# QNAME=%s QTYPE=%s QCLASS=%s\n' %
+                (self.name,
+                 code_totext(self.rrtype, rdict_rrtype),
+                 code_totext(self.rrclass, rdict_rrclass)))
+        f.write(encode_name(self.name))
+        f.write(' %04x %04x\n' % (self.rrtype, self.rrclass))
+
+class EDNS:
+    '''Implements rendering EDNS OPT RR in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - name (string): The owner name of the OPT RR.  The string must be
+      interpreted as a valid domain name.
+    - udpsize (16-bit int): The UDP payload size (set as the RR class)
+    - extrcode (8-bit int): The upper 8 bits of the extended RCODE.
+    - version (8-bit int): The EDNS version.
+    - do (int): The DNSSEC DO bit.  The bit will be set if this value
+      is 1; otherwise the bit will be unset.
+    - mbz (15-bit int): The rest of the flags field.
+    - rdlen (16-bit int): The RDLEN field.  Note: right now specifying
+      a non 0 value (except for making bogus data) doesn't make sense
+      because there is no way to configure RDATA.
+    '''
+    name = '.'
+    udpsize = 4096
+    extrcode = 0
+    version = 0
+    do = 0
+    mbz = 0
+    rdlen = 0
+    def dump(self, f):
+        f.write('\n# EDNS OPT RR\n')
+        f.write('# NAME=%s TYPE=%s UDPSize=%d ExtRcode=%s Version=%s DO=%d\n' %
+                (self.name, code_totext(dict_rrtype['opt'], rdict_rrtype),
+                 self.udpsize, self.extrcode, self.version,
+                 1 if self.do else 0))
+        
+        code_vers = (self.extrcode << 8) | (self.version & 0x00ff)
+        extflags = (self.do << 15) | (self.mbz & ~0x8000)
+        f.write('%s %04x %04x %04x %04x\n' %
+                (encode_name(self.name), dict_rrtype['opt'], self.udpsize,
+                 code_vers, extflags))
+        f.write('# RDLEN=%d\n' % self.rdlen)
+        f.write('%04x\n' % self.rdlen)
+
+class RR:
+    '''This is a base class for various types of RR test data.
+    For each RR type (A, AAAA, NS, etc), we define a derived class of RR
+    to dump type specific RDATA parameters.  This class defines parameters
+    common to all types of RDATA, namely the owner name, RR class and TTL.
+    The dump() method of derived classes are expected to call dump_header(),
+    whose default implementation is provided in this class.  This method
+    decides whether to dump the test data as an RR (with name, type, class)
+    or only as RDATA (with its length), and dumps the corresponding data
+    via the specified file object.
+
+    By convention we assume derived classes are named after the common
+    standard mnemonic of the corresponding RR types.  For example, the
+    derived class for the RR type SOA should be named "SOA".
+
+    Configurable parameters are as follows:
+    - as_rr (bool): Whether or not the data is to be dumped as an RR.
+      False by default.
+    - rr_name (string): The owner name of the RR.  The string must be
+      interpreted as a valid domain name (compression pointer can be
+      contained).  Default is 'example.com.'
+    - rr_class (string): The RR class of the data.  Only meaningful
+      when the data is dumped as an RR.  Default is 'IN'.
+    - rr_ttl (int): The TTL value of the RR.  Only meaningful when
+      the data is dumped as an RR.  Default is 86400 (1 day).
+    - rdlen (int): 16-bit RDATA length.  It can be None (i.e. omitted
+      in the spec file), in which case the actual length of the
+      generated RDATA is automatically determined and used; if
+      negative, the RDLEN field will be omitted from the output data.
+      (Note that omitting RDLEN with as_rr being True is mostly
+      meaningless, although the script doesn't complain about it).
+      Default is None.
+    '''
+
+    def __init__(self):
+        self.as_rr = False
+        # only when as_rr is True, same for class/TTL:
+        self.rr_name = 'example.com'
+        self.rr_class = 'IN'
+        self.rr_ttl = 86400
+        self.rdlen = None
+
+    def dump_header(self, f, rdlen):
+        type_txt = self.__class__.__name__
+        type_code = parse_value(type_txt, dict_rrtype)
+        rdlen_spec = ''
+        rdlen_data = ''
+        if rdlen >= 0:
+            rdlen_spec = ', RDLEN=%d' % rdlen
+            rdlen_data = '%04x' % rdlen
+        if self.as_rr:
+            rrclass = parse_value(self.rr_class, dict_rrclass)
+            f.write('\n# %s RR (QNAME=%s Class=%s TTL=%d%s)\n' %
+                    (type_txt, self.rr_name,
+                     code_totext(rrclass, rdict_rrclass), self.rr_ttl,
+                     rdlen_spec))
+            f.write('%s %04x %04x %08x %s\n' %
+                    (encode_name(self.rr_name), type_code, rrclass,
+                     self.rr_ttl, rdlen_data))
+        else:
+            f.write('\n# %s RDATA%s\n' % (type_txt, rdlen_spec))
+            f.write('%s\n' % rdlen_data)
+
+class A(RR):
+    '''Implements rendering A RDATA (of class IN) in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - address (string): The address field.  This must be a valid textual
+      IPv4 address.
+    '''
+    RDLEN_DEFAULT = 4           # fixed by default
+    address = '192.0.2.1'
+
+    def dump(self, f):
+        if self.rdlen is None:
+            self.rdlen = self.RDLEN_DEFAULT
+        self.dump_header(f, self.rdlen)
+        f.write('# Address=%s\n' % (self.address))
+        bin_address = socket.inet_aton(self.address)
+        f.write('%02x%02x%02x%02x\n' % (bin_address[0], bin_address[1],
+                                        bin_address[2], bin_address[3]))
+
+class AAAA(RR):
+    '''Implements rendering AAAA RDATA (of class IN) in the test data
+    format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - address (string): The address field.  This must be a valid textual
+      IPv6 address.
+    '''
+    RDLEN_DEFAULT = 16          # fixed by default
+    address = '2001:db8::1'
+
+    def dump(self, f):
+        if self.rdlen is None:
+            self.rdlen = self.RDLEN_DEFAULT
+        self.dump_header(f, self.rdlen)
+        f.write('# Address=%s\n' % (self.address))
+        bin_address = socket.inet_pton(socket.AF_INET6, self.address)
+        [f.write('%02x' % x) for x in bin_address]
+        f.write('\n')
+
+class NS(RR):
+    '''Implements rendering NS RDATA in the test data format.
+
+    Configurable parameter is as follows (see the description of the
+    same name of attribute for the default value):
+    - nsname (string): The NSDNAME field.  The string must be
+      interpreted as a valid domain name.
+    '''
+
+    nsname = 'ns.example.com'
+
+    def dump(self, f):
+        nsname_wire = encode_name(self.nsname)
+        if self.rdlen is None:
+            self.rdlen = len(nsname_wire) / 2
+        self.dump_header(f, self.rdlen)
+        f.write('# NS name=%s\n' % (self.nsname))
+        f.write('%s\n' % nsname_wire)
+
+class SOA(RR):
+    '''Implements rendering SOA RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - mname/rname (string): The MNAME/RNAME fields, respectively.  The
+      string must be interpreted as a valid domain name.
+    - serial (32-bit int): The SERIAL field
+    - refresh (32-bit int): The REFRESH field
+    - retry (32-bit int): The RETRY field
+    - expire (32-bit int): The EXPIRE field
+    - minimum (32-bit int): The MINIMUM field
+    '''
+
+    mname = 'ns.example.com'
+    rname = 'root.example.com'
+    serial = 2010012601
+    refresh = 3600
+    retry = 300
+    expire = 3600000
+    minimum = 1200
+    def dump(self, f):
+        mname_wire = encode_name(self.mname)
+        rname_wire = encode_name(self.rname)
+        if self.rdlen is None:
+            self.rdlen = int(20 + len(mname_wire) / 2 + len(str(rname_wire)) / 2)
+        self.dump_header(f, self.rdlen)
+        f.write('# NNAME=%s RNAME=%s\n' % (self.mname, self.rname))
+        f.write('%s %s\n' % (mname_wire, rname_wire))
+        f.write('# SERIAL(%d) REFRESH(%d) RETRY(%d) EXPIRE(%d) MINIMUM(%d)\n' %
+                (self.serial, self.refresh, self.retry, self.expire,
+                 self.minimum))
+        f.write('%08x %08x %08x %08x %08x\n' % (self.serial, self.refresh,
+                                                self.retry, self.expire,
+                                                self.minimum))
+
+class TXT(RR):
+    '''Implements rendering TXT RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - nstring (int): number of character-strings
+    - stringlenN (int) (int, N = 0, ..., nstring-1): the length of the
+      N-th character-string.
+    - stringN (string, N = 0, ..., nstring-1): the N-th
+      character-string.
+    - stringlen (int): the default string.  If nstring >= 1 and the
+      corresponding stringlenN isn't specified in the spec file, this
+      value will be used.  If this parameter isn't specified either,
+      the length of the string will be used.  Note that it means
+      this parameter (or any stringlenN) doesn't have to be specified
+      unless you want to intentially build a broken character string.
+    - string (string): the default string.  If nstring >= 1 and the
+      corresponding stringN isn't specified in the spec file, this
+      string will be used.
+    '''
+
+    nstring = 1
+    stringlen = None
+    string = 'Test String'
+
+    def dump(self, f):
+        stringlen_list = []
+        string_list = []
+        wirestring_list = []
+        for i in range(0, self.nstring):
+            key_string = 'string' + str(i)
+            if key_string in self.__dict__:
+                string_list.append(self.__dict__[key_string])
+            else:
+                string_list.append(self.string)
+            wirestring_list.append(encode_string(string_list[-1]))
+            key_stringlen = 'stringlen' + str(i)
+            if key_stringlen in self.__dict__:
+                stringlen_list.append(self.__dict__[key_stringlen])
+            else:
+                stringlen_list.append(self.stringlen)
+            if stringlen_list[-1] is None:
+                stringlen_list[-1] = int(len(wirestring_list[-1]) / 2)
+        if self.rdlen is None:
+            self.rdlen = int(len(''.join(wirestring_list)) / 2) + self.nstring
+        self.dump_header(f, self.rdlen)
+        for i in range(0, self.nstring):
+            f.write('# String Len=%d, String=\"%s\"\n' %
+                    (stringlen_list[i], string_list[i]))
+            f.write('%02x%s%s\n' % (stringlen_list[i],
+                                    ' ' if len(wirestring_list[i]) > 0 else '',
+                                    wirestring_list[i]))
+
+class RP(RR):
+    '''Implements rendering RP RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - mailbox (string): The mailbox field.
+    - text (string): The text field.
+    These strings must be interpreted as a valid domain name.
+    '''
+    mailbox = 'root.example.com'
+    text = 'rp-text.example.com'
+    def dump(self, f):
+        mailbox_wire = encode_name(self.mailbox)
+        text_wire = encode_name(self.text)
+        if self.rdlen is None:
+            self.rdlen = (len(mailbox_wire) + len(text_wire)) / 2
+        else:
+            self.rdlen = int(self.rdlen)
+        self.dump_header(f, self.rdlen)
+        f.write('# MAILBOX=%s TEXT=%s\n' % (self.mailbox, self.text))
+        f.write('%s %s\n' % (mailbox_wire, text_wire))
+
+class MINFO(RR):
+    '''Implements rendering MINFO RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - rmailbox (string): The rmailbox field.
+    - emailbox (string): The emailbox field.
+    These strings must be interpreted as a valid domain name.
+    '''
+    rmailbox = 'rmailbox.example.com'
+    emailbox = 'emailbox.example.com'
+    def dump(self, f):
+        rmailbox_wire = encode_name(self.rmailbox)
+        emailbox_wire = encode_name(self.emailbox)
+        if self.rdlen is None:
+            self.rdlen = (len(rmailbox_wire) + len(emailbox_wire)) / 2
+        else:
+            self.rdlen = int(self.rdlen)
+        self.dump_header(f, self.rdlen)
+        f.write('# RMAILBOX=%s EMAILBOX=%s\n' % (self.rmailbox, self.emailbox))
+        f.write('%s %s\n' % (rmailbox_wire, emailbox_wire))
+
+class AFSDB(RR):
+    '''Implements rendering AFSDB RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - subtype (16 bit int): The subtype field.
+    - server (string): The server field.
+    The string must be interpreted as a valid domain name.
+    '''
+    subtype = 1
+    server = 'afsdb.example.com'
+    def dump(self, f):
+        server_wire = encode_name(self.server)
+        if self.rdlen is None:
+            self.rdlen = 2 + len(server_wire) / 2
+        else:
+            self.rdlen = int(self.rdlen)
+        self.dump_header(f, self.rdlen)
+        f.write('# SUBTYPE=%d SERVER=%s\n' % (self.subtype, self.server))
+        f.write('%04x %s\n' % (self.subtype, server_wire))
+
+class NSECBASE(RR):
+    '''Implements rendering NSEC/NSEC3 type bitmaps commonly used for
+    these RRs.  The NSEC and NSEC3 classes will be inherited from this
+    class.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - nbitmap (int): The number of type bitmaps.
+    The following three define the bitmaps.  If suffixed with "N"
+    (0 <= N < nbitmaps), it means the definition for the N-th bitmap.
+    If there is no suffix (e.g., just "block", it means the default
+    for any unspecified values)
+    - block[N] (8-bit int): The Window Block.
+    - maplen[N] (8-bit int): The Bitmap Length.  The default "maplen"
+      can also be unspecified (with being set to None), in which case
+      the corresponding length will be calculated from the bitmap.
+    - bitmap[N] (string): The Bitmap.  This must be the hexadecimal
+      representation of the bitmap field.  For example, for a bitmap
+      where the 7th and 15th bits (and only these bits) are set, it
+      must be '0101'.  Note also that the value must be quated with
+      single quatations because it could also be interpreted as an
+      integer.
+    '''
+    nbitmap = 1                 # number of bitmaps
+    block = 0
+    maplen = None              # default bitmap length, auto-calculate
+    bitmap = '040000000003'     # an arbtrarily chosen bitmap sample
+    def dump(self, f):
+        # first, construct the bitmpa data
+        block_list = []
+        maplen_list = []
+        bitmap_list = []
+        for i in range(0, self.nbitmap):
+            key_bitmap = 'bitmap' + str(i)
+            if key_bitmap in self.__dict__:
+                bitmap_list.append(self.__dict__[key_bitmap])
+            else:
+                bitmap_list.append(self.bitmap)
+            key_maplen = 'maplen' + str(i)
+            if key_maplen in self.__dict__:
+                maplen_list.append(self.__dict__[key_maplen])
+            else:
+                maplen_list.append(self.maplen)
+            if maplen_list[-1] is None: # calculate it if not specified
+                maplen_list[-1] = int(len(bitmap_list[-1]) / 2)
+            key_block = 'block' + str(i)
+            if key_block in self.__dict__:
+               block_list.append(self.__dict__[key_block])
+            else:
+                block_list.append(self.block)
+
+        # dump RR-type specific part (NSEC or NSEC3)
+        self.dump_fixedpart(f, 2 * self.nbitmap + \
+                                int(len(''.join(bitmap_list)) / 2))
+
+        # dump the bitmap
+        for i in range(0, self.nbitmap):
+            f.write('# Bitmap: Block=%d, Length=%d\n' %
+                    (block_list[i], maplen_list[i]))
+            f.write('%02x %02x %s\n' %
+                    (block_list[i], maplen_list[i], bitmap_list[i]))
+
+class NSEC(NSECBASE):
+    '''Implements rendering NSEC RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - Type bitmap related parameters: see class NSECBASE
+    - nextname (string): The Next Domain Name field.  The string must be
+      interpreted as a valid domain name.
+    '''
+
+    nextname = 'next.example.com'
+    def dump_fixedpart(self, f, bitmap_totallen):
+        name_wire = encode_name(self.nextname)
+        if self.rdlen is None:
+            # if rdlen needs to be calculated, it must be based on the bitmap
+            # length, because the configured maplen can be fake.
+            self.rdlen = int(len(name_wire) / 2) + bitmap_totallen
+        self.dump_header(f, self.rdlen)
+        f.write('# Next Name=%s (%d bytes)\n' % (self.nextname,
+                                                 int(len(name_wire) / 2)))
+        f.write('%s\n' % name_wire)
+
+class NSEC3(NSECBASE):
+    '''Implements rendering NSEC3 RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - Type bitmap related parameters: see class NSECBASE
+    - hashalg (8-bit int): The Hash Algorithm field.  Note that
+      currently the only defined algorithm is SHA-1, for which a value
+      of 1 will be used, and it's the default.  So this implementation
+      does not support any string representation right now.
+    - optout (bool): The Opt-Out flag of the Flags field.
+    - mbz (7-bit int): The rest of the Flags field.  This value will
+      be left shifted for 1 bit and then OR-ed with optout to
+      construct the complete Flags field.
+    - iterations (16-bit int): The Iterations field.
+    - saltlen (int): The Salt Length field.
+    - salt (string): The Salt field.  It is converted to a sequence of
+      ascii codes and its hexadecimal representation will be used.
+    - hashlen (int): The Hash Length field.
+    - hash (string): The Next Hashed Owner Name field.  This parameter
+      is interpreted as "salt".
+    '''
+
+    hashalg = 1                 # SHA-1
+    optout = False              # opt-out flag
+    mbz = 0                     # other flag fields (none defined yet)
+    iterations = 1
+    saltlen = 5
+    salt = 's' * saltlen
+    hashlen = 20
+    hash = 'h' * hashlen
+    def dump_fixedpart(self, f, bitmap_totallen):
+        if self.rdlen is None:
+            # if rdlen needs to be calculated, it must be based on the bitmap
+            # length, because the configured maplen can be fake.
+            self.rdlen = 4 + 1 + len(self.salt) + 1 + len(self.hash) \
+                + bitmap_totallen
+        self.dump_header(f, self.rdlen)
+        optout_val = 1 if self.optout else 0
+        f.write('# Hash Alg=%s, Opt-Out=%d, Other Flags=%0x, Iterations=%d\n' %
+                (code_totext(self.hashalg, rdict_nsec3_algorithm),
+                 optout_val, self.mbz, self.iterations))
+        f.write('%02x %02x %04x\n' %
+                (self.hashalg, (self.mbz << 1) | optout_val, self.iterations))
+        f.write("# Salt Len=%d, Salt='%s'\n" % (self.saltlen, self.salt))
+        f.write('%02x%s%s\n' % (self.saltlen,
+                                ' ' if len(self.salt) > 0 else '',
+                                encode_string(self.salt)))
+        f.write("# Hash Len=%d, Hash='%s'\n" % (self.hashlen, self.hash))
+        f.write('%02x%s%s\n' % (self.hashlen,
+                                ' ' if len(self.hash) > 0 else '',
+                                encode_string(self.hash)))
+
+class RRSIG(RR):
+    '''Implements rendering RRSIG RDATA in the test data format.
+
+    Configurable parameters are as follows (see the description of the
+    same name of attribute for the default value):
+    - covered (int or string): The Type Covered field.  If specified
+      as an integer, it must be the 16-bit RR type value of the
+      covered type.  If specifed as a string, it must be the textual
+      mnemonic of the type.
+    - algorithm (int or string): The Algorithm field.   If specified
+      as an integer, it must be the 8-bit algorithm number as defined
+      in RFC4034.  If specifed as a string, it must be one of the keys
+      of dict_algorithm (case insensitive).
+    - labels (int): The Labels field.  If omitted (the corresponding
+      variable being set to None), the number of labels of "signer"
+      (excluding the trailing null label as specified in RFC4034) will
+      be used.
+    - originalttl (32-bit int): The Original TTL field.
+    - expiration (32-bit int): The Expiration TTL field.
+    - inception (32-bit int): The Inception TTL field.
+    - tag (16-bit int): The Key Tag field.
+    - signer (string): The Signer's Name field.  The string must be
+      interpreted as a valid domain name.
+    - signature (int): The Signature field.  Right now only a simple
+      integer form is supported.  A prefix of "0" will be prepended if
+      the resulting hexadecimal representation consists of an odd
+      number of characters.
+    '''
+
+    covered = 'A'
+    algorithm = 'RSASHA1'
+    labels = None                 # auto-calculate (#labels of signer)
+    originalttl = 3600
+    expiration = int(time.mktime(datetime.strptime('20100131120000',
+                                                   dnssec_timefmt).timetuple()))
+    inception = int(time.mktime(datetime.strptime('20100101120000',
+                                                  dnssec_timefmt).timetuple()))
+    tag = 0x1035
+    signer = 'example.com'
+    signature = 0x123456789abcdef123456789abcdef
+
+    def dump(self, f):
+        name_wire = encode_name(self.signer)
+        sig_wire = '%x' % self.signature
+        if len(sig_wire) % 2 != 0:
+            sig_wire = '0' + sig_wire
+        if self.rdlen is None:
+            self.rdlen = int(18 + len(name_wire) / 2 + len(str(sig_wire)) / 2)
+        self.dump_header(f, self.rdlen)
+
+        if type(self.covered) is str:
+            self.covered = dict_rrtype[self.covered.lower()]
+        if type(self.algorithm) is str:
+            self.algorithm = dict_algorithm[self.algorithm.lower()]
+        if self.labels is None:
+            self.labels = count_namelabels(self.signer)
+        f.write('# Covered=%s Algorithm=%s Labels=%d OrigTTL=%d\n' %
+                (code_totext(self.covered, rdict_rrtype),
+                 code_totext(self.algorithm, rdict_algorithm), self.labels,
+                 self.originalttl))
+        f.write('%04x %02x %02x %08x\n' % (self.covered, self.algorithm,
+                                           self.labels, self.originalttl))
+        f.write('# Expiration=%s, Inception=%s\n' %
+                (str(self.expiration), str(self.inception)))
+        f.write('%08x %08x\n' % (self.expiration, self.inception))
+        f.write('# Tag=%d Signer=%s and Signature\n' % (self.tag, self.signer))
+        f.write('%04x %s %s\n' % (self.tag, name_wire, sig_wire))
+
+class TSIG(RR):
+    '''Implements rendering TSIG RDATA in the test data format.
+
+    As a meta RR type TSIG uses some non common parameters.  This
+    class overrides some of the default attributes of the RR class
+    accordingly:
+    - rr_class is set to 'ANY'
+    - rr_ttl is set to 0
+    Like other derived classes these can be overridden via the spec
+    file.
+
+    Other configurable parameters are as follows (see the description
+    of the same name of attribute for the default value):
+    - algorithm (string): The Algorithm Name field.  The value is
+      generally interpreted as a domain name string, and will
+      typically be one of the standard algorithm names defined in
+      RFC4635.  For convenience, however, a shortcut value "hmac-md5"
+      is allowed instead of the standard "hmac-md5.sig-alg.reg.int".
+    - time_signed (48-bit int): The Time Signed field.
+    - fudge (16-bit int): The Fudge field.
+    - mac_size (int): The MAC Size field.  If omitted, the common value
+      determined by the algorithm will be used.
+    - mac (int or string): The MAC field.  If specified as an integer,
+      the integer value is used as the MAC, possibly with prepended
+      0's so that the total length will be mac_size.  If specifed as a
+      string, it is converted to a sequence of ascii codes and its
+      hexadecimal representation will be used.  So, for example, if
+      "mac" is set to 'abc', it will be converted to '616263'.  Note
+      that in this case the length of "mac" may not be equal to
+      mac_size.  If unspecified, the mac_size number of '78' (ascii
+      code of 'x') will be used.
+    - original_id (16-bit int): The Original ID field.
+    - error (16-bit int): The Error field.
+    - other_len (int): The Other Len field.
+    - other_data (int or string): The Other Data field.  This is
+      interpreted just like "mac" except that other_len is used
+      instead of mac_size.  If unspecified this will be empty unless
+      the "error" is set to 18 (which means the "BADTIME" error), in
+      which case a hexadecimal representation of "time_signed + fudge
+      + 1" will be used.
+    '''
+
+    algorithm = 'hmac-sha256'
+    time_signed = 1286978795    # arbitrarily chosen default
+    fudge = 300
+    mac_size = None             # use a common value for the algorithm
+    mac = None                  # use 'x' * mac_size
+    original_id = 2845          # arbitrarily chosen default
+    error = 0
+    other_len = None         # 6 if error is BADTIME; otherwise 0
+    other_data = None        # use time_signed + fudge + 1 for BADTIME
+    dict_macsize = { 'hmac-md5' : 16, 'hmac-sha1' : 20, 'hmac-sha256' : 32 }
+
+    # TSIG has some special defaults
+    def __init__(self):
+        super().__init__()
+        self.rr_class = 'ANY'
+        self.rr_ttl = 0
+
+    def dump(self, f):
+        if str(self.algorithm) == 'hmac-md5':
+            name_wire = encode_name('hmac-md5.sig-alg.reg.int')
+        else:
+            name_wire = encode_name(self.algorithm)
+        mac_size = self.mac_size
+        if mac_size is None:
+            if self.algorithm in self.dict_macsize.keys():
+                mac_size = self.dict_macsize[self.algorithm]
+            else:
+                raise RuntimeError('TSIG Mac Size cannot be determined')
+        mac = encode_string('x' * mac_size) if self.mac is None else \
+            encode_string(self.mac, mac_size)
+        other_len = self.other_len
+        if other_len is None:
+            # 18 = BADTIME
+            other_len = 6 if self.error == 18 else 0
+        other_data = self.other_data
+        if other_data is None:
+            other_data = '%012x' % (self.time_signed + self.fudge + 1) \
+                if self.error == 18 else ''
+        else:
+            other_data = encode_string(self.other_data, other_len)
+        if self.rdlen is None:
+            self.rdlen = int(len(name_wire) / 2 + 16 + len(mac) / 2 + \
+                                 len(other_data) / 2)
+        self.dump_header(f, self.rdlen)
+        f.write('# Algorithm=%s Time-Signed=%d Fudge=%d\n' %
+                (self.algorithm, self.time_signed, self.fudge))
+        f.write('%s %012x %04x\n' % (name_wire, self.time_signed, self.fudge))
+        f.write('# MAC Size=%d MAC=(see hex)\n' % mac_size)
+        f.write('%04x%s\n' % (mac_size, ' ' + mac if len(mac) > 0 else ''))
+        f.write('# Original-ID=%d Error=%d\n' % (self.original_id, self.error))
+        f.write('%04x %04x\n' %  (self.original_id, self.error))
+        f.write('# Other-Len=%d Other-Data=(see hex)\n' % other_len)
+        f.write('%04x%s\n' % (other_len,
+                              ' ' + other_data if len(other_data) > 0 else ''))
+
+# Build section-class mapping
+config_param = { 'name' : (Name, {}),
+                 'header' : (DNSHeader, header_xtables),
+                 'question' : (DNSQuestion, question_xtables),
+                 'edns' : (EDNS, {}) }
+for rrtype in dict_rrtype.keys():
+    # For any supported RR types add the tuple of (RR_CLASS, {}).
+    # We expect KeyError as not all the types are supported, and simply
+    # ignore them.
+    try:
+        cur_mod = sys.modules[__name__]
+        config_param[rrtype] = (cur_mod.__dict__[rrtype.upper()], {})
+    except KeyError:
+        pass
+
+def get_config_param(section):
+    s = section
+    m = re.match('^([^:]+)/\d+$', section)
+    if m:
+        s = m.group(1)
+    return config_param[s]
+
+usage = '''usage: %prog [options] input_file'''
+
+if __name__ == "__main__":
+    parser = OptionParser(usage=usage)
+    parser.add_option('-o', '--output', action='store', dest='output',
+                      default=None, metavar='FILE',
+                      help='output file name [default: prefix of input_file]')
+    (options, args) = parser.parse_args()
+
+    if len(args) == 0:
+        parser.error('input file is missing')
+    configfile = args[0]
+
+    outputfile = options.output
+    if not outputfile:
+        m = re.match('(.*)\.[^.]+$', configfile)
+        if m:
+            outputfile = m.group(1)
+        else:
+            raise ValueError('output file is not specified and input file is not in the form of "output_file.suffix"')
+
+    config = configparser.SafeConfigParser()
+    config.read(configfile)
+
+    output = open(outputfile, 'w')
+
+    print_header(output, configfile)
+
+    # First try the 'custom' mode; if it fails assume the query mode.
+    try:
+        sections = config.get('custom', 'sections').split(':')
+    except configparser.NoSectionError:
+        sections = ['header', 'question', 'edns']
+
+    for s in sections:
+        section_param = get_config_param(s)
+        (obj, xtables) = (section_param[0](), section_param[1])
+        if get_config(config, s, obj, xtables):
+            obj.dump(output)
+
+    output.close()
diff --git a/src/lib/util/python/pycppwrapper_util.h b/src/lib/util/python/pycppwrapper_util.h
index 3f396e2..462e715 100644
--- a/src/lib/util/python/pycppwrapper_util.h
+++ b/src/lib/util/python/pycppwrapper_util.h
@@ -293,7 +293,7 @@ protected:
 /// \c PyObject_New() to the caller.
 template <typename PYSTRUCT, typename CPPCLASS>
 struct CPPPyObjectContainer : public PyObjectContainer {
-    CPPPyObjectContainer(PYSTRUCT* obj) : PyObjectContainer(obj) {}
+    explicit CPPPyObjectContainer(PYSTRUCT* obj) : PyObjectContainer(obj) {}
 
     // This method associates a C++ object with the corresponding python
     // object enclosed in this class.
diff --git a/src/lib/util/python/wrapper_template.cc b/src/lib/util/python/wrapper_template.cc
index a703731..426ced5 100644
--- a/src/lib/util/python/wrapper_template.cc
+++ b/src/lib/util/python/wrapper_template.cc
@@ -299,8 +299,8 @@ initModulePart_ at CPPCLASS@(PyObject* mod) {
 
 PyObject*
 create at CPPCLASS@Object(const @CPPCLASS@& source) {
-    @CPPCLASS at Container container =
-        PyObject_New(s_ at CPPCLASS@, &@cppclass at _type);
+    @CPPCLASS at Container container(PyObject_New(s_ at CPPCLASS@,
+                                               &@cppclass at _type));
     container.set(new @CPPCLASS@(source));
     return (container.release());
 }
diff --git a/src/lib/util/python/wrapper_template.h b/src/lib/util/python/wrapper_template.h
index d68a658..be701e1 100644
--- a/src/lib/util/python/wrapper_template.h
+++ b/src/lib/util/python/wrapper_template.h
@@ -37,15 +37,15 @@ bool initModulePart_ at CPPCLASS@(PyObject* mod);
 // Note: this utility function works only when @CPPCLASS@ is a copy
 // constructable.
 // And, it would only be useful when python binding needs to create this
-// object frequently.  Otherwise, it would (or should) probably better to
+// object frequently.  Otherwise, it would (or should) probably be better to
 // remove the declaration and definition of this function.
 //
-/// This is A simple shortcut to create a python @CPPCLASS@ object (in the
+/// This is a simple shortcut to create a python @CPPCLASS@ object (in the
 /// form of a pointer to PyObject) with minimal exception safety.
 /// On success, it returns a valid pointer to PyObject with a reference
 /// counter of 1; if something goes wrong it throws an exception (it never
 /// returns a NULL pointer).
-/// This function is expected to be called with in a try block
+/// This function is expected to be called within a try block
 /// followed by necessary setup for python exception.
 PyObject* create at CPPCLASS@Object(const @CPPCLASS@& source);
 
diff --git a/src/lib/util/pyunittests/Makefile.am b/src/lib/util/pyunittests/Makefile.am
index 63ccf2a..dd2d39a 100644
--- a/src/lib/util/pyunittests/Makefile.am
+++ b/src/lib/util/pyunittests/Makefile.am
@@ -2,7 +2,8 @@ AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-pyexec_LTLIBRARIES = pyunittests_util.la
+noinst_LTLIBRARIES = pyunittests_util.la
+
 pyunittests_util_la_SOURCES = pyunittests_util.cc
 pyunittests_util_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
 pyunittests_util_la_LDFLAGS = $(PYTHON_LDFLAGS)
@@ -15,3 +16,7 @@ pyunittests_util_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 pyunittests_util_la_LDFLAGS += -module
 pyunittests_util_la_LIBADD = $(top_builddir)/src/lib/util/libutil.la
 pyunittests_util_la_LIBADD += $(PYTHON_LIB)
+
+# hack to trigger libtool to not create a convenience archive,
+# resulting in shared modules
+pyunittests_util_la_LDFLAGS += -rpath /nowhere
diff --git a/src/lib/util/strutil.cc b/src/lib/util/strutil.cc
index 161f9ac..ed7fc9b 100644
--- a/src/lib/util/strutil.cc
+++ b/src/lib/util/strutil.cc
@@ -132,6 +132,17 @@ format(const std::string& format, const std::vector<std::string>& args) {
     return (result);
 }
 
+std::string
+getToken(std::istringstream& iss) {
+    string token;
+    iss >> token;
+    if (iss.bad() || iss.fail()) {
+        isc_throw(StringTokenError, "could not read token from string");
+    }
+    return (token);
+}
+
+
 } // namespace str
 } // namespace util
 } // namespace isc
diff --git a/src/lib/util/strutil.h b/src/lib/util/strutil.h
index e044c15..021c236 100644
--- a/src/lib/util/strutil.h
+++ b/src/lib/util/strutil.h
@@ -18,7 +18,10 @@
 #include <algorithm>
 #include <cctype>
 #include <string>
+#include <sstream>
 #include <vector>
+#include <exceptions/exceptions.h>
+#include <boost/lexical_cast.hpp>
 
 namespace isc {
 namespace util {
@@ -26,6 +29,16 @@ namespace str {
 
 /// \brief A Set of C++ Utilities for Manipulating Strings
 
+///
+/// \brief A standard string util exception that is thrown if getToken or
+/// numToToken are called with bad input data
+///
+class StringTokenError : public Exception {
+public:
+    StringTokenError(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what) {}
+};
+
 /// \brief Normalize Backslash
 ///
 /// Only relevant to Windows, this replaces all "\" in a string with "/" and
@@ -140,6 +153,55 @@ std::string format(const std::string& format,
     const std::vector<std::string>& args);
 
 
+/// \brief Returns one token from the given stringstream
+///
+/// Using the >> operator, with basic error checking
+///
+/// \exception StringTokenError if the token cannot be read from the stream
+///
+/// \param iss stringstream to read one token from
+///
+/// \return the first token read from the stringstream
+std::string getToken(std::istringstream& iss);
+
+/// \brief Converts a string token to an *unsigned* integer.
+///
+/// The value is converted using a lexical cast, with error and bounds
+/// checking.
+///
+/// NumType is a *signed* integral type (e.g. int32_t) that is sufficiently
+/// wide to store resulting integers.
+///
+/// BitSize is the maximum number of bits that the resulting integer can take.
+/// This function first checks whether the given token can be converted to
+/// an integer of NumType type.  It then confirms the conversion result is
+/// within the valid range, i.e., [0, 2^BitSize - 1].  The second check is
+/// necessary because lexical_cast<T> where T is an unsigned integer type
+/// doesn't correctly reject negative numbers when compiled with SunStudio.
+///
+/// \exception StringTokenError if the value is out of range, or if it
+///            could not be converted
+///
+/// \param num_token the string token to convert
+///
+/// \return the converted value, of type NumType
+template <typename NumType, int BitSize>
+NumType
+tokenToNum(const std::string& num_token) {
+    NumType num;
+    try {
+        num = boost::lexical_cast<NumType>(num_token);
+    } catch (const boost::bad_lexical_cast& ex) {
+        isc_throw(StringTokenError, "Invalid SRV numeric parameter: " <<
+                  num_token);
+    }
+    if (num < 0 || num >= (static_cast<NumType>(1) << BitSize)) {
+        isc_throw(StringTokenError, "Numeric SRV parameter out of range: " <<
+                  num);
+    }
+    return (num);
+}
+
 } // namespace str
 } // namespace util
 } // namespace isc
diff --git a/src/lib/util/tests/buffer_unittest.cc b/src/lib/util/tests/buffer_unittest.cc
index 0cd1823..666924e 100644
--- a/src/lib/util/tests/buffer_unittest.cc
+++ b/src/lib/util/tests/buffer_unittest.cc
@@ -239,4 +239,36 @@ TEST_F(BufferTest, outputBufferZeroSize) {
     });
 }
 
+TEST_F(BufferTest, readVectorAll) {
+    std::vector<uint8_t> vec;
+
+    // check that vector can read the whole buffer
+    ibuffer.readVector(vec, 5);
+
+    ASSERT_EQ(5, vec.size());
+    EXPECT_EQ(0, memcmp(&vec[0], testdata, 5));
+
+    // ibuffer is 5 bytes long. Can't read past it.
+    EXPECT_THROW(
+        ibuffer.readVector(vec, 1),
+        isc::util::InvalidBufferPosition
+    );
+}
+
+TEST_F(BufferTest, readVectorChunks) {
+    std::vector<uint8_t> vec;
+
+    // check that vector can read the whole buffer
+    ibuffer.readVector(vec, 3);
+    EXPECT_EQ(3, vec.size());
+
+    EXPECT_EQ(0, memcmp(&vec[0], testdata, 3));
+
+    EXPECT_NO_THROW(
+        ibuffer.readVector(vec, 2)
+    );
+
+    EXPECT_EQ(0, memcmp(&vec[0], testdata+3, 2));
+}
+
 }
diff --git a/src/lib/util/tests/filename_unittest.cc b/src/lib/util/tests/filename_unittest.cc
index be29ff1..07f3525 100644
--- a/src/lib/util/tests/filename_unittest.cc
+++ b/src/lib/util/tests/filename_unittest.cc
@@ -51,42 +51,49 @@ TEST_F(FilenameTest, Components) {
     EXPECT_EQ("/alpha/beta/", fname.directory());
     EXPECT_EQ("gamma", fname.name());
     EXPECT_EQ(".delta", fname.extension());
+    EXPECT_EQ("gamma.delta", fname.nameAndExtension());
 
     // Directory only
     fname.setName("/gamma/delta/");
     EXPECT_EQ("/gamma/delta/", fname.directory());
     EXPECT_EQ("", fname.name());
     EXPECT_EQ("", fname.extension());
+    EXPECT_EQ("", fname.nameAndExtension());
 
     // Filename only
     fname.setName("epsilon");
     EXPECT_EQ("", fname.directory());
     EXPECT_EQ("epsilon", fname.name());
     EXPECT_EQ("", fname.extension());
+    EXPECT_EQ("epsilon", fname.nameAndExtension());
 
     // Extension only
     fname.setName(".zeta");
     EXPECT_EQ("", fname.directory());
     EXPECT_EQ("", fname.name());
     EXPECT_EQ(".zeta", fname.extension());
+    EXPECT_EQ(".zeta", fname.nameAndExtension());
 
     // Missing directory
     fname.setName("eta.theta");
     EXPECT_EQ("", fname.directory());
     EXPECT_EQ("eta", fname.name());
     EXPECT_EQ(".theta", fname.extension());
+    EXPECT_EQ("eta.theta", fname.nameAndExtension());
 
     // Missing filename
     fname.setName("/iota/.kappa");
     EXPECT_EQ("/iota/", fname.directory());
     EXPECT_EQ("", fname.name());
     EXPECT_EQ(".kappa", fname.extension());
+    EXPECT_EQ(".kappa", fname.nameAndExtension());
 
     // Missing extension
     fname.setName("lambda/mu/nu");
     EXPECT_EQ("lambda/mu/", fname.directory());
     EXPECT_EQ("nu", fname.name());
     EXPECT_EQ("", fname.extension());
+    EXPECT_EQ("nu", fname.nameAndExtension());
 
     // Check that the decomposition can occur in the presence of leading and
     // trailing spaces
@@ -94,18 +101,21 @@ TEST_F(FilenameTest, Components) {
     EXPECT_EQ("lambda/mu/", fname.directory());
     EXPECT_EQ("nu", fname.name());
     EXPECT_EQ("", fname.extension());
+    EXPECT_EQ("nu", fname.nameAndExtension());
 
     // Empty string
     fname.setName("");
     EXPECT_EQ("", fname.directory());
     EXPECT_EQ("", fname.name());
     EXPECT_EQ("", fname.extension());
+    EXPECT_EQ("", fname.nameAndExtension());
 
     // ... and just spaces
     fname.setName("  ");
     EXPECT_EQ("", fname.directory());
     EXPECT_EQ("", fname.name());
     EXPECT_EQ("", fname.extension());
+    EXPECT_EQ("", fname.nameAndExtension());
 
     // Check corner cases - where separators are present, but strings are
     // absent.
@@ -113,16 +123,19 @@ TEST_F(FilenameTest, Components) {
     EXPECT_EQ("/", fname.directory());
     EXPECT_EQ("", fname.name());
     EXPECT_EQ("", fname.extension());
+    EXPECT_EQ("", fname.nameAndExtension());
 
     fname.setName(".");
     EXPECT_EQ("", fname.directory());
     EXPECT_EQ("", fname.name());
     EXPECT_EQ(".", fname.extension());
+    EXPECT_EQ(".", fname.nameAndExtension());
 
     fname.setName("/.");
     EXPECT_EQ("/", fname.directory());
     EXPECT_EQ("", fname.name());
     EXPECT_EQ(".", fname.extension());
+    EXPECT_EQ(".", fname.nameAndExtension());
 
     // Note that the space is a valid filename here; only leading and trailing
     // spaces should be trimmed.
@@ -130,11 +143,13 @@ TEST_F(FilenameTest, Components) {
     EXPECT_EQ("/", fname.directory());
     EXPECT_EQ(" ", fname.name());
     EXPECT_EQ(".", fname.extension());
+    EXPECT_EQ(" .", fname.nameAndExtension());
 
     fname.setName(" / . ");
     EXPECT_EQ("/", fname.directory());
     EXPECT_EQ(" ", fname.name());
     EXPECT_EQ(".", fname.extension());
+    EXPECT_EQ(" .", fname.nameAndExtension());
 }
 
 // Check that the expansion with a default works.
diff --git a/src/lib/util/tests/io_utilities_unittest.cc b/src/lib/util/tests/io_utilities_unittest.cc
index 4aad560..4293c7e 100644
--- a/src/lib/util/tests/io_utilities_unittest.cc
+++ b/src/lib/util/tests/io_utilities_unittest.cc
@@ -19,6 +19,7 @@
 
 #include <cstddef>
 
+#include <arpa/inet.h>
 #include <gtest/gtest.h>
 
 #include <util/buffer.h>
@@ -71,3 +72,48 @@ TEST(asioutil, writeUint16) {
         EXPECT_EQ(ref[1], test[1]);
     }
 }
+
+// test data shared amount readUint32 and writeUint32 tests
+const static uint32_t test32[] = {
+    0,
+    1,
+    2000,
+    0x80000000,
+    0xffffffff
+};
+
+TEST(asioutil, readUint32) {
+    uint8_t data[8];
+
+    // make sure that we can read data, regardless of
+    // the memory alignment. That' why we need to repeat
+    // it 4 times.
+    for (int offset=0; offset < 4; offset++) {
+        for (int i=0; i < sizeof(test32)/sizeof(uint32_t); i++) {
+            uint32_t tmp = htonl(test32[i]);
+            memcpy(&data[offset], &tmp, sizeof(uint32_t));
+
+            EXPECT_EQ(test32[i], readUint32(&data[offset]));
+        }
+    }
+}
+
+
+TEST(asioutil, writeUint32) {
+    uint8_t data[8];
+
+    // make sure that we can write data, regardless of
+    // the memory alignment. That's why we need to repeat
+    // it 4 times.
+    for (int offset=0; offset < 4; offset++) {
+        for (int i=0; i < sizeof(test32)/sizeof(uint32_t); i++) {
+            uint8_t* ptr = writeUint32(test32[i], &data[offset]);
+
+            EXPECT_EQ(&data[offset]+sizeof(uint32_t), ptr);
+
+            uint32_t tmp = htonl(test32[i]);
+
+            EXPECT_EQ(0, memcmp(&tmp, &data[offset], sizeof(uint32_t)));
+        }
+    }
+}
diff --git a/src/lib/util/tests/strutil_unittest.cc b/src/lib/util/tests/strutil_unittest.cc
index cd3a9ca..74bc17d 100644
--- a/src/lib/util/tests/strutil_unittest.cc
+++ b/src/lib/util/tests/strutil_unittest.cc
@@ -12,6 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <stdint.h>
+
 #include <string>
 
 #include <gtest/gtest.h>
@@ -22,17 +24,9 @@ using namespace isc;
 using namespace isc::util;
 using namespace std;
 
-class StringUtilTest : public ::testing::Test {
-protected:
-    StringUtilTest()
-    {
-    }
-};
-
-
 // Check for slash replacement
 
-TEST_F(StringUtilTest, Slash) {
+TEST(StringUtilTest, Slash) {
 
     string instring = "";
     isc::util::str::normalizeSlash(instring);
@@ -49,7 +43,7 @@ TEST_F(StringUtilTest, Slash) {
 
 // Check that leading and trailing space trimming works
 
-TEST_F(StringUtilTest, Trim) {
+TEST(StringUtilTest, Trim) {
 
     // Empty and full string.
     EXPECT_EQ("", isc::util::str::trim(""));
@@ -71,7 +65,7 @@ TEST_F(StringUtilTest, Trim) {
 // returned vector; if not as expected, the following references may be invalid
 // so should not be used.
 
-TEST_F(StringUtilTest, Tokens) {
+TEST(StringUtilTest, Tokens) {
     vector<string>  result;
 
     // Default delimiters
@@ -157,7 +151,7 @@ TEST_F(StringUtilTest, Tokens) {
 
 // Changing case
 
-TEST_F(StringUtilTest, ChangeCase) {
+TEST(StringUtilTest, ChangeCase) {
     string mixed("abcDEFghiJKLmno123[]{=+--+]}");
     string upper("ABCDEFGHIJKLMNO123[]{=+--+]}");
     string lower("abcdefghijklmno123[]{=+--+]}");
@@ -173,7 +167,7 @@ TEST_F(StringUtilTest, ChangeCase) {
 
 // Formatting
 
-TEST_F(StringUtilTest, Formatting) {
+TEST(StringUtilTest, Formatting) {
 
     vector<string> args;
     args.push_back("arg1");
@@ -213,3 +207,63 @@ TEST_F(StringUtilTest, Formatting) {
     string format9 = "%s %s";
     EXPECT_EQ(format9, isc::util::str::format(format9, args));
 }
+
+TEST(StringUtilTest, getToken) {
+    string s("a b c");
+    istringstream ss(s);
+    EXPECT_EQ("a", isc::util::str::getToken(ss));
+    EXPECT_EQ("b", isc::util::str::getToken(ss));
+    EXPECT_EQ("c", isc::util::str::getToken(ss));
+    EXPECT_THROW(isc::util::str::getToken(ss), isc::util::str::StringTokenError);
+}
+
+int32_t tokenToNumCall_32_16(const string& token) {
+    return isc::util::str::tokenToNum<int32_t, 16>(token);
+}
+
+int16_t tokenToNumCall_16_8(const string& token) {
+    return isc::util::str::tokenToNum<int16_t, 8>(token);
+}
+
+TEST(StringUtilTest, tokenToNum) {
+    uint32_t num32 = tokenToNumCall_32_16("0");
+    EXPECT_EQ(0, num32);
+    num32 = tokenToNumCall_32_16("123");
+    EXPECT_EQ(123, num32);
+    num32 = tokenToNumCall_32_16("65535");
+    EXPECT_EQ(65535, num32);
+
+    EXPECT_THROW(tokenToNumCall_32_16(""),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("a"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("-1"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("65536"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("1234567890"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_32_16("-1234567890"),
+                 isc::util::str::StringTokenError);
+
+    uint16_t num16 = tokenToNumCall_16_8("123");
+    EXPECT_EQ(123, num16);
+    num16 = tokenToNumCall_16_8("0");
+    EXPECT_EQ(0, num16);
+    num16 = tokenToNumCall_16_8("255");
+    EXPECT_EQ(255, num16);
+
+    EXPECT_THROW(tokenToNumCall_16_8(""),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("a"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("-1"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("256"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("1234567890"),
+                 isc::util::str::StringTokenError);
+    EXPECT_THROW(tokenToNumCall_16_8("-1234567890"),
+                 isc::util::str::StringTokenError);
+
+}
diff --git a/src/lib/util/unittests/Makefile.am b/src/lib/util/unittests/Makefile.am
index 83235f2..bbb0d49 100644
--- a/src/lib/util/unittests/Makefile.am
+++ b/src/lib/util/unittests/Makefile.am
@@ -1,7 +1,7 @@
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
-lib_LTLIBRARIES = libutil_unittests.la
+noinst_LTLIBRARIES = libutil_unittests.la
 libutil_unittests_la_SOURCES = fork.h fork.cc resolver.h
 libutil_unittests_la_SOURCES += newhook.h newhook.cc
 libutil_unittests_la_SOURCES += testdata.h testdata.cc
diff --git a/tests/lettuce/README b/tests/lettuce/README
new file mode 100644
index 0000000..21a57c7
--- /dev/null
+++ b/tests/lettuce/README
@@ -0,0 +1,127 @@
+BIND10 system testing with Lettuce
+or: to BDD or not to BDD
+
+In this directory, we define a set of behavioral tests for BIND 10. Currently,
+these tests are specific for BIND10, but we are keeping in mind that RFC-related
+tests could be separated, so that we can test other systems as well.
+
+Prerequisites:
+- Installed version of BIND 10 (but see below how to run it from source tree)
+- dig
+- lettuce (http://lettuce.it)
+
+To install lettuce, if you have the python pip installation tool, simply do
+pip install lettuce
+See http://lettuce.it/intro/install.html
+
+Most systems have the pip tool in a separate package; on Debian-based systems
+it is called python-pip. On FreeBSD the port is devel/py-pip.
+
+Running the tests
+-----------------
+
+At this moment, we have a fixed port for local tests in our setups, port 47806.
+This port must be free. (TODO: can we make this run-time discovered?).
+Port 47805 is used for cmdctl, and must also be available.
+(note, we will need to extend this to a range, or if possible, we will need to
+do some on-the-fly available port finding)
+
+The bind10 main program, bindctl, and dig must all be in the default search 
+path of your environment, and BIND 10 must not be running if you use the 
+installed version when you run the tests.
+
+If you want to test an installed version of bind 10, just run 'lettuce' in
+this directory.
+
+We have provided a script that sets up the shell environment to run the tests
+with the build tree version of bind. If your shell uses export to set
+environment variables, you can source the script setup_intree_bind10.sh, then
+run lettuce.
+
+Due to the default way lettuce prints its output, it is advisable to run it
+in a terminal that is wide than the default. If you see a lot of lines twice
+in different colors, the terminal is not wide enough.
+
+If you just want to run one specific feature test, use
+lettuce features/<feature file>
+
+To run a specific scenario from a feature, use
+lettuce features/<feature file> -s <scenario number>
+
+We have set up the tests to assume that lettuce is run from this directory,
+so even if you specify a specific feature file, you should do it from this
+directory.
+
+What to do when a test fails
+----------------------------
+
+First of all, look at the error it printed and see what step it occurred in.
+If written well, the output should explain most of what went wrong.
+
+The stacktrace that is printed is *not* of bind10, but of the testing
+framework; this helps in finding more information about what exactly the test
+tried to achieve when it failed (as well as help debug the tests themselves).
+
+Furthermore, if any scenario fails, the output from long-running processes
+will be stored in the directory output/. The name of the files will be
+<Feature name>-<Scenario name>-<Process name>.stdout and
+<Feature name>-<Scenario name>-<Process name>.stderr
+Where spaces and other non-standard characters are replaced by an underscore.
+The process name is either the standard name for said process (e.g. 'bind10'),
+or the name given to it by the test ('when i run bind10 as <name>').
+
+These files *will* be overwritten or deleted if the same scenarios are run
+again, so if you want to inspect them after a failed test, either do so
+immediately or move the files.
+
+Adding and extending tests
+--------------------------
+
+If you want to add tests, it is advisable to first go through the examples to
+see what is possible, and read the documentation on http://www.lettuce.it
+
+There is also a README.tutorial file here.
+
+We have a couple of conventions to keep things manageable.
+
+Configuration files go into the configurations/ directory.
+Data files go into the data/ directory.
+Step definition go into the features/terrain/ directory (the name terrain is 
+chosen for the same reason Lettuce chose terrain.py, this is the place the 
+tests 'live' in).
+Feature definitions go directly into the features/ directory.
+
+These directories are currently not divided further; we may want to consider 
+this as the set grows. Due to a (current?) limitation of Lettuce, for 
+feature files this is currently not possible; the python files containing 
+steps and terrain must be below or at the same level of the feature files.
+
+Long-running processes should be started through the world.RunningProcesses
+instance. If you want to add a process (e.g. bind9), create start, stop and
+control steps in terrain/<base_name>_control.py, and let it use the
+RunningProcesses API (defined in terrain.py). See bind10_control.py for an
+example.
+
+For sending queries and checking the results, steps have been defined in
+terrain/querying.py. These use dig and store the results split up into text
+strings. This is intentionally not parsed through our own library (as that way
+we might run into a 'symmetric bug'). If you need something more advanced from
+query results, define it here.
+
+Some very general steps are defined in terrain/steps.py.
+Initialization code, cleanup code, and helper classes are defined in
+terrain/terrain.py.
+
+To find the right steps, case insensitive matching is used. Parameters taken
+from the steps are case-sensitive though. So a step defined as
+'do foo with value (bar)' will be matched when using
+'Do Foo with value xyz', but xyz will be taken as given.
+
+If you need to add steps that are very particular to one test, create a new 
+file with a name relevant for that test in terrain. We may want to consider 
+creating a specific subdirectory for these, but at this moment it is unclear 
+whether we need to.
+
+We should try to keep steps as general as possible, while not making them to
+complex and error-prone.
+
diff --git a/tests/lettuce/README.tutorial b/tests/lettuce/README.tutorial
new file mode 100644
index 0000000..18c94cf
--- /dev/null
+++ b/tests/lettuce/README.tutorial
@@ -0,0 +1,157 @@
+Quick tutorial and overview
+---------------------------
+
+Lettuce is a framework for doing Behaviour Driven Development (BDD).
+
+The idea behind BDD is that you first write down your requirements in
+the form of scenarios, then implement their behaviour.
+
+We do not plan on doing full BDD, but such a system should also help
+us make system tests. And, hopefully, being able to better identify
+what exactly is going wrong when a test fails.
+
+Lettuce is a python implementation of the Cucumber framework, which is
+a ruby system. So far we chose lettuce because we already need python
+anyway, so chances are higher that any system we want to run it on
+supports it. It only supports a subset of cucumber, but more cucumber
+features are planned. As I do not know much details of cucumber, I
+can't really say what is there and what is not.
+
+A slight letdown is that the current version does not support python 3.
+However, as long as the tool-calling glue is python2, this should not
+cause any problems, since these aren't unit tests; We do not plan to use
+our libraries directly, but only through the runnable scripts and
+executables.
+
+-----
+
+Features, Scenarios, Steps.
+
+Lettuce makes a distinction between features, scenarios, and steps.
+
+Features are general, well, features. Each 'feature' has its own file
+ending in .feature. A feature file contains a description and a number
+of scenarios. Each scenario tests one or more particular parts of the
+feature. Each scenario consists of a number of steps.
+
+So let's open up a simple one.
+
+-- example.feature
+Feature: showing off BIND 10
+    This is to show BIND 10 running and that it answer queries
+
+    Scenario: Starting bind10
+        # steps go here
+--
+
+I have predefined a number of steps we can use, as we build test we
+will need to expand these, but we will look at them shortly.
+
+This file defines a feature, just under the feature name we can
+provide a description of the feature.
+
+The one scenario we have no has no steps, so if we run it we should
+see something like:
+
+-- output
+> lettuce
+Feature: showing off BIND 10
+  This is to show BIND 10 running and that it answer queries
+
+  Scenario: Starting bind10
+
+1 feature (1 passed)
+1 scenario (1 passed)
+0 step (0 passed)
+--
+
+Let's first add some steps that send queries.
+
+--
+        A query for www.example.com should have rcode REFUSED
+        A query for www.example.org should have rcode NOERROR
+--
+
+Since we didn't start any bind10, dig will time out and the result
+should be an error saying it got no answer. Errors are in the
+form of stack traces (trigger by failed assertions), so we can find
+out easily where in the tests they occurred. Especially when the total
+set of steps gets bigger we might need that.
+
+So let's add a step that starts bind10.
+
+--
+        When I start bind10 with configuration example.org.config
+--
+
+This is not good enough; it will fire of the process, but setting up
+b10-auth may take a few moments, so we need to add a step to wait for
+it to be started before we continue.
+
+--
+        Then wait for bind10 auth to start
+--
+
+And let's run the tests again.
+
+--
+> lettuce
+
+Feature: showing off BIND 10
+  This is to show BIND 10 running and that it answer queries
+
+  Scenario: Starting bind10
+    When I start bind10 with configuration example.org.config
+    Then wait for bind10 auth to start
+    A query for www.example.com should have rcode REFUSED
+    A query for www.example.org should have rcode NOERROR
+
+1 feature (1 passed)
+1 scenario (1 passed)
+4 steps (4 passed)
+(finished within 2 seconds)
+--
+
+So take a look at one of those steps, let's pick the first one.
+
+A step is defined through a python decorator, which in essence is a regular
+expression; lettuce searches through all defined steps to find one that
+matches. These are 'partial' matches (unless specified otherwise in the
+regular expression itself), so if the step is defined with "do foo bar", the
+scenario can add words for readability "When I do foo bar".
+
+Each captured group will be passed as an argument to the function we define.
+For bind10, i defined a configuration file, a cmdctl port, and a process
+name. The first two should be self-evident, and the process name is an
+optional name we give it, should we want to address it in the rest of the
+tests. This is most useful if we want to start multiple instances. In the
+next step (the wait for auth to start), I added a 'of <instance>'. So if we 
+define the bind10 'as b10_second_instance', we can specify that one here as 
+'of b10_second_instance'.
+
+--
+        When I start bind10 with configuration second.config
+        with cmdctl port 12345 as b10_second_instance
+--
+(line wrapped for readability)
+
+But notice how we needed two steps, which we probably always need (but
+not entirely always)? We can also combine steps; for instance:
+
+--
+ at step('have bind10 running(?: with configuration ([\w.]+))?')
+def have_bind10_running(step, config_file):
+    step.given('start bind10 with configuration ' + config_file)
+    step.given('wait for bind10 auth to start')
+--
+
+Now we can replace the two steps with one:
+
+--
+    Given I have bind10 running
+--
+
+That's it for the quick overview. For some more examples, with comments, 
+take a look at features/example.feature. You can read more about lettuce and
+its features on http://www.lettuce.it, and if you plan on adding tests and
+scenarios, please consult the last section of the main README first.
diff --git a/tests/lettuce/configurations/example.org.config.orig b/tests/lettuce/configurations/example.org.config.orig
new file mode 100644
index 0000000..642f2dd
--- /dev/null
+++ b/tests/lettuce/configurations/example.org.config.orig
@@ -0,0 +1,17 @@
+{
+    "version": 2,
+    "Logging": {
+        "loggers": [ {
+            "debuglevel": 99,
+            "severity": "DEBUG",
+            "name": "auth"
+        } ]
+    },
+    "Auth": {
+        "database_file": "data/example.org.sqlite3",
+        "listen_on": [ {
+            "port": 47806,
+            "address": "127.0.0.1"
+        } ]
+    }
+}
diff --git a/tests/lettuce/configurations/example2.org.config b/tests/lettuce/configurations/example2.org.config
new file mode 100644
index 0000000..1a40d1b
--- /dev/null
+++ b/tests/lettuce/configurations/example2.org.config
@@ -0,0 +1,18 @@
+{
+    "version": 2,
+    "Logging": {
+        "loggers": [ {
+            "severity": "DEBUG",
+            "name": "auth",
+            "debuglevel": 99
+        }
+        ]
+    },
+    "Auth": {
+        "database_file": "data/example.org.sqlite3",
+        "listen_on": [ {
+            "port": 47807,
+            "address": "127.0.0.1"
+        } ]
+    }
+}
diff --git a/tests/lettuce/configurations/no_db_file.config b/tests/lettuce/configurations/no_db_file.config
new file mode 100644
index 0000000..f865354
--- /dev/null
+++ b/tests/lettuce/configurations/no_db_file.config
@@ -0,0 +1,10 @@
+{
+    "version": 2,
+    "Auth": {
+        "database_file": "data/test_nonexistent_db.sqlite3",
+        "listen_on": [ {
+            "port": 47806,
+            "address": "127.0.0.1"
+        } ]
+    }
+}
diff --git a/tests/lettuce/configurations/xfrin/retransfer_master.conf b/tests/lettuce/configurations/xfrin/retransfer_master.conf
new file mode 100644
index 0000000..95cd88e
--- /dev/null
+++ b/tests/lettuce/configurations/xfrin/retransfer_master.conf
@@ -0,0 +1,22 @@
+{
+    "version": 2,
+    "Logging": {
+        "loggers": [ {
+            "debuglevel": 99,
+            "severity": "DEBUG",
+            "name": "auth"
+        } ]
+    },
+    "Auth": {
+        "database_file": "data/example.org.sqlite3",
+        "listen_on": [ {
+            "port": 47807,
+            "address": "127.0.0.1"
+        } ]
+    },
+    "Xfrout": {
+        "zone_config": [ {
+            "origin": "example.org"
+        } ]
+    }
+}
diff --git a/tests/lettuce/configurations/xfrin/retransfer_slave.conf b/tests/lettuce/configurations/xfrin/retransfer_slave.conf
new file mode 100644
index 0000000..51622cd
--- /dev/null
+++ b/tests/lettuce/configurations/xfrin/retransfer_slave.conf
@@ -0,0 +1,17 @@
+{
+    "version": 2,
+    "Logging": {
+        "loggers": [ {
+            "debuglevel": 99,
+            "severity": "DEBUG",
+            "name": "auth"
+        } ]
+    },
+    "Auth": {
+        "database_file": "data/test_nonexistent_db.sqlite3",
+        "listen_on": [ {
+            "port": 47806,
+            "address": "127.0.0.1"
+        } ]
+    }
+}
diff --git a/tests/lettuce/data/empty_db.sqlite3 b/tests/lettuce/data/empty_db.sqlite3
new file mode 100644
index 0000000..f27a8b8
Binary files /dev/null and b/tests/lettuce/data/empty_db.sqlite3 differ
diff --git a/tests/lettuce/data/example.org.sqlite3 b/tests/lettuce/data/example.org.sqlite3
new file mode 100644
index 0000000..070012f
Binary files /dev/null and b/tests/lettuce/data/example.org.sqlite3 differ
diff --git a/tests/lettuce/features/example.feature b/tests/lettuce/features/example.feature
new file mode 100644
index 0000000..d1ed6b3
--- /dev/null
+++ b/tests/lettuce/features/example.feature
@@ -0,0 +1,142 @@
+Feature: Example feature
+    This is an example Feature set. Is is mainly intended to show
+    our use of the lettuce tool and our own framework for it
+    The first scenario is to show what a simple test would look like, and
+    is intentionally uncommented.
+    The later scenarios have comments to show what the test steps do and
+    support
+    
+    Scenario: A simple example
+        Given I have bind10 running with configuration example.org.config
+        A query for www.example.org should have rcode NOERROR
+        A query for www.doesnotexist.org should have rcode REFUSED
+        The SOA serial for example.org should be 1234
+
+    Scenario: New database
+        # This test checks whether a database file is automatically created
+        # Underwater, we take advantage of our intialization routines so
+        # that we are sure this file does not exist, see
+        # features/terrain/terrain.py
+        
+        # Standard check to test (non-)existence of a file
+        # This file is actually automatically
+        The file data/test_nonexistent_db.sqlite3 should not exist
+
+        # In the first scenario, we used 'given I have bind10 running', which
+        # is actually a compound step consisting of the following two
+        # one to start the server
+        When I start bind10 with configuration no_db_file.config
+        # And one to wait until it reports that b10-auth has started
+        Then wait for bind10 auth to start
+
+        # This is a general step to stop a named process. By convention,
+        # the default name for any process is the same as the one we
+        # use in the start step (for bind 10, that is 'I start bind10 with')
+        # See scenario 'Multiple instances' for more.
+        Then stop process bind10
+        
+        # Now we use the first step again to see if the file has been created
+        The file data/test_nonexistent_db.sqlite3 should exist
+
+    Scenario: example.org queries
+        # This scenario performs a number of queries and inspects the results
+        # Simple queries have already been show, but after we have sent a query,
+        # we can also do more extensive checks on the result.
+        # See querying.py for more information on these steps.
+        
+        # note: lettuce can group similar checks by using tables, but we
+        # intentionally do not make use of that here
+
+        # This is a compound statement that starts and waits for the
+        # started message
+        Given I have bind10 running with configuration example.org.config
+
+        # Some simple queries that is not examined further
+        A query for www.example.com should have rcode REFUSED
+        A query for www.example.org should have rcode NOERROR
+
+        # A query where we look at some of the result properties
+        A query for www.example.org should have rcode NOERROR
+        The last query response should have qdcount 1
+        The last query response should have ancount 1
+        The last query response should have nscount 3
+        The last query response should have adcount 0
+        # The answer section can be inspected in its entirety; in the future
+        # we may add more granular inspection steps
+        The answer section of the last query response should be
+        """
+        www.example.org.   3600    IN    A      192.0.2.1
+        """
+
+        A query for example.org type NS should have rcode NOERROR
+        The answer section of the last query response should be
+        """
+        example.org. 3600 IN NS ns1.example.org.
+        example.org. 3600 IN NS ns2.example.org.
+        example.org. 3600 IN NS ns3.example.org.
+        """
+
+        # We have a specific step for checking SOA serial numbers
+        The SOA serial for example.org should be 1234
+
+        # Another query where we look at some of the result properties
+        A query for doesnotexist.example.org should have rcode NXDOMAIN
+        The last query response should have qdcount 1
+        The last query response should have ancount 0
+        The last query response should have nscount 1
+        The last query response should have adcount 0
+        # When checking flags, we must pass them exactly as they appear in
+        # the output of dig.
+        The last query response should have flags qr aa rd
+
+        A query for www.example.org type TXT should have rcode NOERROR
+        The last query response should have ancount 0
+
+        # Some queries where we specify more details about what to send and
+        # where
+        A query for www.example.org class CH should have rcode REFUSED
+        A query for www.example.org to 127.0.0.1 should have rcode NOERROR
+        A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
+        A query for www.example.org type A class IN to 127.0.0.1:47806 should have rcode NOERROR
+
+    Scenario: changing database
+        # This scenario contains a lot of 'wait for' steps
+        # If those are not present, the asynchronous nature of the application
+        # can cause some of the things we send to be handled out of order;
+        # for instance auth could still be serving the old zone when we send
+        # the new query, or already respond from the new database.
+        # Therefore we wait for specific log messages after each operation
+        #
+        # This scenario outlines every single step, and does not use
+        # 'steps of steps' (e.g. Given I have bind10 running)
+        # We can do that but as an example this is probably better to learn
+        # the system
+
+        When I start bind10 with configuration example.org.config
+        Then wait for bind10 auth to start
+        Wait for bind10 stderr message CMDCTL_STARTED
+        A query for www.example.org should have rcode NOERROR
+        Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
+        Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
+        And wait for new bind10 stderr message DATASRC_SQLITE_OPEN
+        A query for www.example.org should have rcode REFUSED
+        Wait for new bind10 stderr message AUTH_SEND_NORMAL_RESPONSE
+        Then set bind10 configuration Auth/database_file to data/example.org.sqlite3
+        And wait for new bind10 stderr message DATASRC_SQLITE_OPEN
+        A query for www.example.org should have rcode NOERROR
+
+    Scenario: two bind10 instances
+        # This is more a test of the test system, start 2 bind10's
+        When I start bind10 with configuration example.org.config as bind10_one
+        And I start bind10 with configuration example2.org.config with cmdctl port 47804 as bind10_two
+
+        Then wait for bind10 auth of bind10_one to start
+        Then wait for bind10 auth of bind10_two to start
+        A query for www.example.org to 127.0.0.1:47806 should have rcode NOERROR
+        A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
+
+        Then set bind10 configuration Auth/database_file to data/empty_db.sqlite3
+        And wait for bind10_one stderr message DATASRC_SQLITE_OPEN
+
+        A query for www.example.org to 127.0.0.1:47806 should have rcode REFUSED
+        A query for www.example.org to 127.0.0.1:47807 should have rcode NOERROR
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
new file mode 100644
index 0000000..5248316
--- /dev/null
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -0,0 +1,144 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from lettuce import *
+import subprocess
+import re
+
+ at step('start bind10(?: with configuration (\S+))?' +\
+      '(?: with cmdctl port (\d+))?' +\
+      '(?: with msgq socket file (\S+))?' +\
+      '(?: as (\S+))?')
+def start_bind10(step, config_file, cmdctl_port, msgq_sockfile, process_name):
+    """
+    Start BIND 10 with the given optional config file, cmdctl port, and
+    store the running process in world with the given process name.
+    Parameters:
+    config_file ('with configuration <file>', optional): this configuration
+                will be used. The path is relative to the base lettuce
+                directory.
+    cmdctl_port ('with cmdctl port <portnr>', optional): The port on which
+                b10-cmdctl listens for bindctl commands. Defaults to 47805.
+    msgq_sockfile ('with msgq socket file', optional): The msgq socket file
+                that will be used for internal communication
+    process_name ('as <name>', optional). This is the name that can be used
+                 in the following steps of the scenario to refer to this
+                 BIND 10 instance. Defaults to 'bind10'.
+    This call will block until BIND10_STARTUP_COMPLETE or BIND10_STARTUP_ERROR
+    is logged. In the case of the latter, or if it times out, the step (and
+    scenario) will fail.
+    It will also fail if there is a running process with the given process_name
+    already.
+    """
+    args = [ 'bind10', '-v' ]
+    if config_file is not None:
+        args.append('-p')
+        args.append("configurations/")
+        args.append('-c')
+        args.append(config_file)
+    if cmdctl_port is None:
+        args.append('--cmdctl-port=47805')
+    else:
+        args.append('--cmdctl-port=' + cmdctl_port)
+    if process_name is None:
+        process_name = "bind10"
+    else:
+        args.append('-m')
+        args.append(process_name + '_msgq.socket')
+
+    world.processes.add_process(step, process_name, args)
+
+    # check output to know when startup has been completed
+    (message, line) = world.processes.wait_for_stderr_str(process_name,
+                                                     ["BIND10_STARTUP_COMPLETE",
+                                                      "BIND10_STARTUP_ERROR"])
+    assert message == "BIND10_STARTUP_COMPLETE", "Got: " + str(line)
+
+ at step('wait for bind10 auth (?:of (\w+) )?to start')
+def wait_for_auth(step, process_name):
+    """Wait for b10-auth to run. This is done by blocking until the message
+       AUTH_SERVER_STARTED is logged.
+       Parameters:
+       process_name ('of <name', optional): The name of the BIND 10 instance
+                    to wait for. Defaults to 'bind10'.
+    """
+    if process_name is None:
+        process_name = "bind10"
+    world.processes.wait_for_stderr_str(process_name, ['AUTH_SERVER_STARTED'],
+                                        False)
+
+ at step('have bind10 running(?: with configuration ([\S]+))?' +\
+      '(?: with cmdctl port (\d+))?' +\
+      '(?: as ([\S]+))?')
+def have_bind10_running(step, config_file, cmdctl_port, process_name):
+    """
+    Compound convenience step for running bind10, which consists of
+    start_bind10 and wait_for_auth.
+    Currently only supports the 'with configuration' option.
+    """
+    start_step = 'start bind10 with configuration ' + config_file
+    wait_step = 'wait for bind10 auth to start'
+    if cmdctl_port is not None:
+        start_step += ' with cmdctl port ' + str(cmdctl_port)
+    if process_name is not None:
+        start_step += ' as ' + process_name
+        wait_step = 'wait for bind10 auth of ' + process_name + ' to start'
+    step.given(start_step)
+    step.given(wait_step)
+
+ at step('set bind10 configuration (\S+) to (.*)(?: with cmdctl port (\d+))?')
+def set_config_command(step, name, value, cmdctl_port):
+    """
+    Run bindctl, set the given configuration to the given value, and commit it.
+    Parameters:
+    name ('configuration <name>'): Identifier of the configuration to set
+    value ('to <value>'): value to set it to.
+    cmdctl_port ('with cmdctl port <portnr>', optional): cmdctl port to send
+                the command to. Defaults to 47805.
+    Fails if cmdctl does not exit with status code 0.
+    """
+    if cmdctl_port is None:
+        cmdctl_port = '47805'
+    args = ['bindctl', '-p', cmdctl_port]
+    bindctl = subprocess.Popen(args, 1, None, subprocess.PIPE,
+                               subprocess.PIPE, None)
+    bindctl.stdin.write("config set " + name + " " + value + "\n")
+    bindctl.stdin.write("config commit\n")
+    bindctl.stdin.write("quit\n")
+    result = bindctl.wait()
+    assert result == 0, "bindctl exit code: " + str(result)
+
+ at step('send bind10 the command (.+)(?: with cmdctl port (\d+))?')
+def send_command(step, command, cmdctl_port):
+    """
+    Run bindctl, send the given command, and exit bindctl.
+    Parameters:
+    command ('the command <command>'): The command to send.
+    cmdctl_port ('with cmdctl port <portnr>', optional): cmdctl port to send
+                the command to. Defaults to 47805.
+    Fails if cmdctl does not exit with status code 0.
+    """
+    if cmdctl_port is None:
+        cmdctl_port = '47805'
+    args = ['bindctl', '-p', cmdctl_port]
+    bindctl = subprocess.Popen(args, 1, None, subprocess.PIPE,
+                               subprocess.PIPE, None)
+    bindctl.stdin.write(command + "\n")
+    bindctl.stdin.write("quit\n")
+    (stdout, stderr) = bindctl.communicate()
+    result = bindctl.returncode
+    assert result == 0, "bindctl exit code: " + str(result) +\
+                        "\nstdout:\n" + str(stdout) +\
+                        "stderr:\n" + str(stderr)
diff --git a/tests/lettuce/features/terrain/querying.py b/tests/lettuce/features/terrain/querying.py
new file mode 100644
index 0000000..ea89b18
--- /dev/null
+++ b/tests/lettuce/features/terrain/querying.py
@@ -0,0 +1,279 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+# This script provides querying functionality
+# The most important step is
+#
+# query for <name> [type X] [class X] [to <addr>[:port]] should have rcode <rc>
+#
+# By default, it will send queries to 127.0.0.1:47806 unless specified
+# otherwise. The rcode is always checked. If the result is not NO_ANSWER,
+# the result will be stored in last_query_result, which can then be inspected
+# more closely, for instance with the step
+#
+# "the last query response should have <property> <value>"
+#
+# Also see example.feature for some examples
+
+from lettuce import *
+import subprocess
+import re
+
+#
+# define a class to easily access different parts
+# We may consider using our full library for this, but for now
+# simply store several parts of the response as text values in
+# this structure.
+# (this actually has the advantage of not relying on our own libraries
+# to test our own, well, libraries)
+#
+# The following attributes are 'parsed' from the response, all as strings,
+# and end up as direct attributes of the QueryResult object:
+# opcode, rcode, id, flags, qdcount, ancount, nscount, adcount
+# (flags is one string with all flags, in the order they appear in the
+# response packet.)
+#
+# this will set 'rcode' as the result code, we 'define' one additional
+# rcode, "NO_ANSWER", if the dig process returned an error code itself
+# In this case none of the other attributes will be set.
+#
+# The different sections will be lists of strings, one for each RR in the
+# section. The question section will start with ';', as per dig output
+#
+# See server_from_sqlite3.feature for various examples to perform queries
+class QueryResult(object):
+    status_re = re.compile("opcode: ([A-Z])+, status: ([A-Z]+), id: ([0-9]+)")
+    flags_re = re.compile("flags: ([a-z ]+); QUERY: ([0-9]+), ANSWER: " +
+                          "([0-9]+), AUTHORITY: ([0-9]+), ADDITIONAL: ([0-9]+)")
+
+    def __init__(self, name, qtype, qclass, address, port):
+        """
+        Constructor. This fires of a query using dig.
+        Parameters:
+        name: The domain name to query
+        qtype: The RR type to query. Defaults to A if it is None.
+        qclass: The RR class to query. Defaults to IN if it is None.
+        address: The IP adress to send the query to.
+        port: The port number to send the query to.
+        All parameters must be either strings or have the correct string
+        representation.
+        Only one query attempt will be made.
+        """
+        args = [ 'dig', '+tries=1', '@' + str(address), '-p', str(port) ]
+        if qtype is not None:
+            args.append('-t')
+            args.append(str(qtype))
+        if qclass is not None:
+            args.append('-c')
+            args.append(str(qclass))
+        args.append(name)
+        dig_process = subprocess.Popen(args, 1, None, None, subprocess.PIPE,
+                                       None)
+        result = dig_process.wait()
+        if result != 0:
+            self.rcode = "NO_ANSWER"
+        else:
+            self.rcode = None
+            parsing = "HEADER"
+            self.question_section = []
+            self.answer_section = []
+            self.authority_section = []
+            self.additional_section = []
+            self.line_handler = self.parse_header
+            for out in dig_process.stdout:
+                self.line_handler(out)
+
+    def _check_next_header(self, line):
+        """
+        Returns true if we found a next header, and sets the internal
+        line handler to the appropriate value.
+        """
+        if line == ";; ANSWER SECTION:\n":
+            self.line_handler = self.parse_answer
+        elif line == ";; AUTHORITY SECTION:\n":
+            self.line_handler = self.parse_authority
+        elif line == ";; ADDITIONAL SECTION:\n":
+            self.line_handler = self.parse_additional
+        elif line.startswith(";; Query time"):
+            self.line_handler = self.parse_footer
+        else:
+            return False
+        return True
+
+    def parse_header(self, line):
+        """
+        Parse the header lines of the query response.
+        Parameters:
+        line: The current line of the response.
+        """
+        if not self._check_next_header(line):
+            status_match = self.status_re.search(line)
+            flags_match = self.flags_re.search(line)
+            if status_match is not None:
+                self.opcode = status_match.group(1)
+                self.rcode = status_match.group(2)
+            elif flags_match is not None:
+                self.flags = flags_match.group(1)
+                self.qdcount = flags_match.group(2)
+                self.ancount = flags_match.group(3)
+                self.nscount = flags_match.group(4)
+                self.adcount = flags_match.group(5)
+
+    def parse_question(self, line):
+        """
+        Parse the question section lines of the query response.
+        Parameters:
+        line: The current line of the response.
+        """
+        if not self._check_next_header(line):
+            if line != "\n":
+                self.question_section.append(line.strip())
+
+    def parse_answer(self, line):
+        """
+        Parse the answer section lines of the query response.
+        Parameters:
+        line: The current line of the response.
+        """
+        if not self._check_next_header(line):
+            if line != "\n":
+                self.answer_section.append(line.strip())
+
+    def parse_authority(self, line):
+        """
+        Parse the authority section lines of the query response.
+        Parameters:
+        line: The current line of the response.
+        """
+        if not self._check_next_header(line):
+            if line != "\n":
+                self.authority_section.append(line.strip())
+
+    def parse_additional(self, line):
+        """
+        Parse the additional section lines of the query response.
+        Parameters:
+        line: The current line of the response.
+        """
+        if not self._check_next_header(line):
+            if line != "\n":
+                self.additional_section.append(line.strip())
+
+    def parse_footer(self, line):
+        """
+        Parse the footer lines of the query response.
+        Parameters:
+        line: The current line of the response.
+        """
+        pass
+
+ at step('A query for ([\w.]+) (?:type ([A-Z]+) )?(?:class ([A-Z]+) )?' +
+      '(?:to ([^:]+)(?::([0-9]+))? )?should have rcode ([\w.]+)')
+def query(step, query_name, qtype, qclass, addr, port, rcode):
+    """
+    Run a query, check the rcode of the response, and store the query
+    result in world.last_query_result.
+    Parameters:
+    query_name ('query for <name>'): The domain name to query.
+    qtype ('type <type>', optional): The RR type to query. Defaults to A.
+    qclass ('class <class>', optional): The RR class to query. Defaults to IN.
+    addr ('to <address>', optional): The IP address of the nameserver to query.
+                           Defaults to 127.0.0.1.
+    port (':<port>', optional): The port number of the nameserver to query.
+                      Defaults to 47806.
+    rcode ('should have rcode <rcode>'): The expected rcode of the answer.
+    """
+    if qtype is None:
+        qtype = "A"
+    if qclass is None:
+        qclass = "IN"
+    if addr is None:
+        addr = "127.0.0.1"
+    if port is None:
+        port = 47806
+    query_result = QueryResult(query_name, qtype, qclass, addr, port)
+    assert query_result.rcode == rcode,\
+        "Expected: " + rcode + ", got " + query_result.rcode
+    world.last_query_result = query_result
+
+ at step('The SOA serial for ([\w.]+) should be ([0-9]+)')
+def query_soa(step, query_name, serial):
+    """
+    Convenience function to check the SOA SERIAL value of the given zone at
+    the nameserver at the default address (127.0.0.1:47806).
+    Parameters:
+    query_name ('for <name>'): The zone to find the SOA record for.
+    serial ('should be <number>'): The expected value of the SOA SERIAL.
+    If the rcode is not NOERROR, or the answer section does not contain the
+    SOA record, this step fails.
+    """
+    query_result = QueryResult(query_name, "SOA", "IN", "127.0.0.1", "47806")
+    assert "NOERROR" == query_result.rcode,\
+        "Got " + query_result.rcode + ", expected NOERROR"
+    assert len(query_result.answer_section) == 1,\
+        "Too few or too many answers in SOA response"
+    soa_parts = query_result.answer_section[0].split()
+    assert serial == soa_parts[6],\
+        "Got SOA serial " + soa_parts[6] + ", expected " + serial
+
+ at step('last query response should have (\S+) (.+)')
+def check_last_query(step, item, value):
+    """
+    Check a specific value in the reponse from the last successful query sent.
+    Parameters:
+    item: The item to check the value of
+    value: The expected value.
+    This performs a very simple direct string comparison of the QueryResult
+    member with the given item name and the given value.
+    Fails if the item is unknown, or if its value does not match the expected
+    value.
+    """
+    assert world.last_query_result is not None
+    assert item in world.last_query_result.__dict__
+    lq_val = world.last_query_result.__dict__[item]
+    assert str(value) == str(lq_val),\
+           "Got: " + str(lq_val) + ", expected: " + str(value)
+
+ at step('([a-zA-Z]+) section of the last query response should be')
+def check_last_query_section(step, section):
+    """
+    Check the entire contents of the given section of the response of the last
+    query.
+    Parameters:
+    section ('<section> section'): The name of the section (QUESTION, ANSWER,
+                                   AUTHORITY or ADDITIONAL).
+    The expected response is taken from the multiline part of the step in the
+    scenario. Differing whitespace is ignored, but currently the order is
+    significant.
+    Fails if they do not match.
+    """
+    response_string = None
+    if section.lower() == 'question':
+        response_string = "\n".join(world.last_query_result.question_section)
+    elif section.lower() == 'answer':
+        response_string = "\n".join(world.last_query_result.answer_section)
+    elif section.lower() == 'authority':
+        response_string = "\n".join(world.last_query_result.answer_section)
+    elif section.lower() == 'additional':
+        response_string = "\n".join(world.last_query_result.answer_section)
+    else:
+        assert False, "Unknown section " + section
+    # replace whitespace of any length by one space
+    response_string = re.sub("[ \t]+", " ", response_string)
+    expect = re.sub("[ \t]+", " ", step.multiline)
+    assert response_string.strip() == expect.strip(),\
+        "Got:\n'" + response_string + "'\nExpected:\n'" + step.multiline +"'"
+    
+    
diff --git a/tests/lettuce/features/terrain/steps.py b/tests/lettuce/features/terrain/steps.py
new file mode 100644
index 0000000..4b199d6
--- /dev/null
+++ b/tests/lettuce/features/terrain/steps.py
@@ -0,0 +1,85 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# This file contains a number of common steps that are general and may be used
+# By a lot of feature files.
+#
+
+from lettuce import *
+import os
+
+ at step('stop process (\w+)')
+def stop_a_named_process(step, process_name):
+    """
+    Stop the process with the given name.
+    Parameters:
+    process_name ('process <name>'): Name of the process to stop.
+    """
+    world.processes.stop_process(process_name)
+
+ at step('wait for (new )?(\w+) stderr message (\w+)(?: not (\w+))?')
+def wait_for_message(step, new, process_name, message, not_message):
+    """
+    Block until the given message is printed to the given process's stderr
+    output.
+    Parameter:
+    new: (' new', optional): Only check the output printed since last time
+                             this step was used for this process.
+    process_name ('<name> stderr'): Name of the process to check the output of.
+    message ('message <message>'): Output (part) to wait for.
+    not_message ('not <message>'): Output (part) to wait for, and fail
+    Fails if the message is not found after 10 seconds.
+    """
+    strings = [message]
+    if not_message is not None:
+        strings.append(not_message)
+    (found, line) = world.processes.wait_for_stderr_str(process_name, strings, new)
+    if not_message is not None:
+        assert found != not_message, line
+
+ at step('wait for (new )?(\w+) stdout message (\w+)(?: not (\w+))?')
+def wait_for_message(step, process_name, message, not_message):
+    """
+    Block until the given message is printed to the given process's stdout
+    output.
+    Parameter:
+    new: (' new', optional): Only check the output printed since last time
+                             this step was used for this process.
+    process_name ('<name> stderr'): Name of the process to check the output of.
+    message ('message <message>'): Output (part) to wait for, and succeed.
+    not_message ('not <message>'): Output (part) to wait for, and fail
+    Fails if the message is not found after 10 seconds.
+    """
+    strings = [message]
+    if not_message is not None:
+        strings.append(not_message)
+    (found, line) = world.processes.wait_for_stdout_str(process_name, strings, new)
+    if not_message is not None:
+        assert found != not_message, line
+
+ at step('the file (\S+) should (not )?exist')
+def check_existence(step, file_name, should_not_exist):
+    """
+    Check the existence of the given file.
+    Parameters:
+    file_name ('file <name>'): File to check existence of.
+    should_not_exist ('not', optional): Whether it should or should not exist.
+    Fails if the file should exist and does not, or vice versa.
+    """
+    if should_not_exist is None:
+        assert os.path.exists(file_name), file_name + " does not exist"
+    else:
+        assert not os.path.exists(file_name), file_name + " exists"
diff --git a/tests/lettuce/features/terrain/terrain.py b/tests/lettuce/features/terrain/terrain.py
new file mode 100644
index 0000000..d2ac03f
--- /dev/null
+++ b/tests/lettuce/features/terrain/terrain.py
@@ -0,0 +1,363 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# This is the 'terrain' in which the lettuce lives. By convention, this is
+# where global setup and teardown is defined.
+#
+# We declare some attributes of the global 'world' variables here, so the
+# tests can safely assume they are present.
+#
+# We also use it to provide scenario invariants, such as resetting data.
+#
+
+from lettuce import *
+import subprocess
+import os.path
+import shutil
+import re
+import time
+
+# In order to make sure we start all tests with a 'clean' environment,
+# We perform a number of initialization steps, like restoring configuration
+# files, and removing generated data files.
+
+# This approach may not scale; if so we should probably provide specific
+# initialization steps for scenarios. But until that is shown to be a problem,
+# It will keep the scenarios cleaner.
+
+# This is a list of files that are freshly copied before each scenario
+# The first element is the original, the second is the target that will be
+# used by the tests that need them
+copylist = [
+["configurations/example.org.config.orig", "configurations/example.org.config"]
+]
+
+# This is a list of files that, if present, will be removed before a scenario
+removelist = [
+"data/test_nonexistent_db.sqlite3"
+]
+
+# When waiting for output data of a running process, use OUTPUT_WAIT_INTERVAL
+# as the interval in which to check again if it has not been found yet.
+# If we have waited OUTPUT_WAIT_MAX_INTERVALS times, we will abort with an
+# error (so as not to hang indefinitely)
+OUTPUT_WAIT_INTERVAL = 0.5
+OUTPUT_WAIT_MAX_INTERVALS = 20
+
+# class that keeps track of one running process and the files
+# we created for it.
+class RunningProcess:
+    def __init__(self, step, process_name, args):
+        # set it to none first so destructor won't error if initializer did
+        """
+        Initialize the long-running process structure, and start the process.
+        Parameters:
+        step: The scenario step it was called from. This is used for
+              determining the output files for redirection of stdout
+              and stderr.
+        process_name: The name to refer to this running process later.
+        args: Array of arguments to pass to Popen().
+        """
+        self.process = None
+        self.step = step
+        self.process_name = process_name
+        self.remove_files_on_exit = True
+        self._check_output_dir()
+        self._create_filenames()
+        self._start_process(args)
+
+    def _start_process(self, args):
+        """
+        Start the process.
+        Parameters:
+        args:
+        Array of arguments to pass to Popen().
+        """
+        stderr_write = open(self.stderr_filename, "w")
+        stdout_write = open(self.stdout_filename, "w")
+        self.process = subprocess.Popen(args, 1, None, subprocess.PIPE,
+                                        stdout_write, stderr_write)
+        # open them again, this time for reading
+        self.stderr = open(self.stderr_filename, "r")
+        self.stdout = open(self.stdout_filename, "r")
+
+    def mangle_filename(self, filebase, extension):
+        """
+        Remove whitespace and non-default characters from a base string,
+        and return the substituted value. Whitespace is replaced by an
+        underscore. Any other character that is not an ASCII letter, a
+        number, a dot, or a hyphen or underscore is removed.
+        Parameter:
+        filebase: The string to perform the substitution and removal on
+        extension: An extension to append to the result value
+        Returns the modified filebase with the given extension
+        """
+        filebase = re.sub("\s+", "_", filebase)
+        filebase = re.sub("[^a-zA-Z0-9.\-_]", "", filebase)
+        return filebase + "." + extension
+
+    def _check_output_dir(self):
+        # We may want to make this overridable by the user, perhaps
+        # through an environment variable. Since we currently expect
+        # lettuce to be run from our lettuce dir, we shall just use
+        # the relative path 'output/'
+        """
+        Make sure the output directory for stdout/stderr redirection
+        exists.
+        Fails if it exists but is not a directory, or if it does not
+        and we are unable to create it.
+        """
+        self._output_dir = os.getcwd() + os.sep + "output"
+        if not os.path.exists(self._output_dir):
+            os.mkdir(self._output_dir)
+        assert os.path.isdir(self._output_dir),\
+            self._output_dir + " is not a directory."
+
+    def _create_filenames(self):
+        """
+        Derive the filenames for stdout/stderr redirection from the
+        feature, scenario, and process name. The base will be
+        "<Feature>-<Scenario>-<process name>.[stdout|stderr]"
+        """
+        filebase = self.step.scenario.feature.name + "-" +\
+                   self.step.scenario.name + "-" + self.process_name
+        self.stderr_filename = self._output_dir + os.sep +\
+                               self.mangle_filename(filebase, "stderr")
+        self.stdout_filename = self._output_dir + os.sep +\
+                               self.mangle_filename(filebase, "stdout")
+
+    def stop_process(self):
+        """
+        Stop this process by calling terminate(). Blocks until process has
+        exited. If remove_files_on_exit is True, redirected output files
+        are removed.
+        """
+        if self.process is not None:
+            self.process.terminate()
+            self.process.wait()
+        self.process = None
+        if self.remove_files_on_exit:
+            self._remove_files()
+
+    def _remove_files(self):
+        """
+        Remove the files created for redirection of stdout/stderr output.
+        """
+        os.remove(self.stderr_filename)
+        os.remove(self.stdout_filename)
+
+    def _wait_for_output_str(self, filename, running_file, strings, only_new):
+        """
+        Wait for a line of output in this process. This will (if only_new is
+        False) first check all previous output from the process, and if not
+        found, check all output since the last time this method was called.
+        For each line in the output, the given strings array is checked. If
+        any output lines checked contains one of the strings in the strings
+        array, that string (not the line!) is returned.
+        Parameters:
+        filename: The filename to read previous output from, if applicable.
+        running_file: The open file to read new output from.
+        strings: Array of strings to look for.
+        only_new: If true, only check output since last time this method was
+                  called. If false, first check earlier output.
+        Returns a tuple containing the matched string, and the complete line
+        it was found in.
+        Fails if none of the strings was read after 10 seconds
+        (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+        """
+        if not only_new:
+            full_file = open(filename, "r")
+            for line in full_file:
+                for string in strings:
+                    if line.find(string) != -1:
+                        full_file.close()
+                        return (string, line)
+        wait_count = 0
+        while wait_count < OUTPUT_WAIT_MAX_INTERVALS:
+            where = running_file.tell()
+            line = running_file.readline()
+            if line:
+                for string in strings:
+                    if line.find(string) != -1:
+                        return (string, line)
+            else:
+                wait_count += 1
+                time.sleep(OUTPUT_WAIT_INTERVAL)
+                running_file.seek(where)
+        assert False, "Timeout waiting for process output: " + str(strings)
+
+    def wait_for_stderr_str(self, strings, only_new = True):
+        """
+        Wait for one of the given strings in this process's stderr output.
+        Parameters:
+        strings: Array of strings to look for.
+        only_new: If true, only check output since last time this method was
+                  called. If false, first check earlier output.
+        Returns a tuple containing the matched string, and the complete line
+        it was found in.
+        Fails if none of the strings was read after 10 seconds
+        (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+        """
+        return self._wait_for_output_str(self.stderr_filename, self.stderr,
+                                         strings, only_new)
+
+    def wait_for_stdout_str(self, strings, only_new = True):
+        """
+        Wait for one of the given strings in this process's stdout output.
+        Parameters:
+        strings: Array of strings to look for.
+        only_new: If true, only check output since last time this method was
+                  called. If false, first check earlier output.
+        Returns a tuple containing the matched string, and the complete line
+        it was found in.
+        Fails if none of the strings was read after 10 seconds
+        (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+        """
+        return self._wait_for_output_str(self.stdout_filename, self.stdout,
+                                         strings, only_new)
+
+# Container class for a number of running processes
+# i.e. servers like bind10, etc
+# one-shot programs like dig or bindctl are started and closed separately
+class RunningProcesses:
+    def __init__(self):
+        """
+        Initialize with no running processes.
+        """
+        self.processes = {}
+    
+    def add_process(self, step, process_name, args):
+        """
+        Start a process with the given arguments, and store it under the given
+        name.
+        Parameters:
+        step: The scenario step it was called from. This is used for
+              determining the output files for redirection of stdout
+              and stderr.
+        process_name: The name to refer to this running process later.
+        args: Array of arguments to pass to Popen().
+        Fails if a process with the given name is already running.
+        """
+        assert process_name not in self.processes,\
+            "Process " + process_name + " already running"
+        self.processes[process_name] = RunningProcess(step, process_name, args)
+
+    def get_process(self, process_name):
+        """
+        Return the Process with the given process name.
+        Parameters:
+        process_name: The name of the process to return.
+        Fails if the process is not running.
+        """
+        assert process_name in self.processes,\
+            "Process " + name + " unknown"
+        return self.processes[process_name]
+
+    def stop_process(self, process_name):
+        """
+        Stop the Process with the given process name.
+        Parameters:
+        process_name: The name of the process to return.
+        Fails if the process is not running.
+        """
+        assert process_name in self.processes,\
+            "Process " + name + " unknown"
+        self.processes[process_name].stop_process()
+        del self.processes[process_name]
+        
+    def stop_all_processes(self):
+        """
+        Stop all running processes.
+        """
+        for process in self.processes.values():
+            process.stop_process()
+    
+    def keep_files(self):
+        """
+        Keep the redirection files for stdout/stderr output of all processes
+        instead of removing them when they are stopped later.
+        """
+        for process in self.processes.values():
+            process.remove_files_on_exit = False
+
+    def wait_for_stderr_str(self, process_name, strings, only_new = True):
+        """
+        Wait for one of the given strings in the given process's stderr output.
+        Parameters:
+        process_name: The name of the process to check the stderr output of.
+        strings: Array of strings to look for.
+        only_new: If true, only check output since last time this method was
+                  called. If false, first check earlier output.
+        Returns the matched string.
+        Fails if none of the strings was read after 10 seconds
+        (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+        Fails if the process is unknown.
+        """
+        assert process_name in self.processes,\
+           "Process " + process_name + " unknown"
+        return self.processes[process_name].wait_for_stderr_str(strings,
+                                                                only_new)
+
+    def wait_for_stdout_str(self, process_name, strings, only_new = True):
+        """
+        Wait for one of the given strings in the given process's stdout output.
+        Parameters:
+        process_name: The name of the process to check the stdout output of.
+        strings: Array of strings to look for.
+        only_new: If true, only check output since last time this method was
+                  called. If false, first check earlier output.
+        Returns the matched string.
+        Fails if none of the strings was read after 10 seconds
+        (OUTPUT_WAIT_INTERVAL * OUTPUT_WAIT_MAX_INTERVALS).
+        Fails if the process is unknown.
+        """
+        assert process_name in self.processes,\
+           "Process " + process_name + " unknown"
+        return self.processes[process_name].wait_for_stdout_str(strings,
+                                                                only_new)
+
+ at before.each_scenario
+def initialize(scenario):
+    """
+    Global initialization for each scenario.
+    """
+    # Keep track of running processes
+    world.processes = RunningProcesses()
+
+    # Convenience variable to access the last query result from querying.py
+    world.last_query_result = None
+
+    # Some tests can modify the settings. If the tests fail half-way, or
+    # don't clean up, this can leave configurations or data in a bad state,
+    # so we copy them from originals before each scenario
+    for item in copylist:
+        shutil.copy(item[0], item[1])
+
+    for item in removelist:
+        if os.path.exists(item):
+            os.remove(item)
+
+ at after.each_scenario
+def cleanup(scenario):
+    """
+    Global cleanup for each scenario.
+    """
+    # Keep output files if the scenario failed
+    if not scenario.passed:
+        world.processes.keep_files()
+    # Stop any running processes we may have had around
+    world.processes.stop_all_processes()
+    
diff --git a/tests/lettuce/features/xfrin_bind10.feature b/tests/lettuce/features/xfrin_bind10.feature
new file mode 100644
index 0000000..70c3571
--- /dev/null
+++ b/tests/lettuce/features/xfrin_bind10.feature
@@ -0,0 +1,11 @@
+Feature: Xfrin 
+    Tests for Xfrin, specific for BIND 10 behaviour.
+    
+    Scenario: Retransfer command
+    Given I have bind10 running with configuration xfrin/retransfer_master.conf with cmdctl port 47804 as master
+    And I have bind10 running with configuration xfrin/retransfer_slave.conf
+    A query for www.example.org should have rcode REFUSED
+    Wait for bind10 stderr message CMDCTL_STARTED
+    When I send bind10 the command Xfrin retransfer example.org IN 127.0.0.1 47807
+    Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE
+    A query for www.example.org should have rcode NOERROR
diff --git a/tests/lettuce/setup_intree_bind10.sh.in b/tests/lettuce/setup_intree_bind10.sh.in
new file mode 100755
index 0000000..40fd82d
--- /dev/null
+++ b/tests/lettuce/setup_intree_bind10.sh.in
@@ -0,0 +1,46 @@
+#! /bin/sh
+
+# Copyright (C) 2010  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+PYTHON_EXEC=${PYTHON_EXEC:- at PYTHON@}
+export PYTHON_EXEC
+
+BIND10_PATH=@abs_top_builddir@/src/bin/bind10
+
+PATH=@abs_top_builddir@/src/bin/bind10:@abs_top_builddir@/src/bin/bindctl:@abs_top_builddir@/src/bin/msgq:@abs_top_builddir@/src/bin/auth:@abs_top_builddir@/src/bin/resolver:@abs_top_builddir@/src/bin/cfgmgr:@abs_top_builddir@/src/bin/cmdctl:@abs_top_builddir@/src/bin/stats:@abs_top_builddir@/src/bin/xfrin:@abs_top_builddir@/src/bin/xfrout:@abs_top_builddir@/src/bin/zonemgr:@abs_top_builddir@/src/bin/dhcp6:@abs_top_builddir@/src/bin/sockcreator:$PATH
+export PATH
+
+PYTHONPATH=@abs_top_builddir@/src/bin:@abs_top_builddir@/src/lib/python/isc/log_messages:@abs_top_builddir@/src/lib/python:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/xfr/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/python/isc/config:@abs_top_builddir@/src/lib/python/isc/acl/.libs:@abs_top_builddir@/src/lib/python/isc/datasrc/.libs
+export PYTHONPATH
+
+# If necessary (rare cases), explicitly specify paths to dynamic libraries
+# required by loadable python modules.
+SET_ENV_LIBRARY_PATH=@SET_ENV_LIBRARY_PATH@
+if test $SET_ENV_LIBRARY_PATH = yes; then
+	@ENV_LIBRARY_PATH@=@abs_top_builddir@/src/lib/dns/.libs:@abs_top_builddir@/src/lib/dns/python/.libs:@abs_top_builddir@/src/lib/cryptolink/.libs:@abs_top_builddir@/src/lib/cc/.libs:@abs_top_builddir@/src/lib/config/.libs:@abs_top_builddir@/src/lib/log/.libs:@abs_top_builddir@/src/lib/acl/.libs:@abs_top_builddir@/src/lib/util/.libs:@abs_top_builddir@/src/lib/util/io/.libs:@abs_top_builddir@/src/lib/exceptions/.libs:@abs_top_builddir@/src/lib/datasrc/.libs:$@ENV_LIBRARY_PATH@
+	export @ENV_LIBRARY_PATH@
+fi
+
+B10_FROM_SOURCE=@abs_top_srcdir@
+export B10_FROM_SOURCE
+# TODO: We need to do this feature based (ie. no general from_source)
+# But right now we need a second one because some spec files are
+# generated and hence end up under builddir
+B10_FROM_BUILD=@abs_top_builddir@
+export B10_FROM_BUILD
+
+BIND10_MSGQ_SOCKET_FILE=@abs_top_builddir@/msgq_socket
+export BIND10_MSGQ_SOCKET_FILE
diff --git a/tests/system/README b/tests/system/README
index a43d49e..a1c0a97 100644
--- a/tests/system/README
+++ b/tests/system/README
@@ -5,48 +5,49 @@ See COPYRIGHT in the source root or http://isc.org/copyright.html for terms.
 This is a simple test environment for running BIND 10 system tests
 involving multiple name servers.  It was originally developed for BIND
 9, and has been ported to test BIND 10 implementations.  Ideally we
-should share the same framework for both versions, so some part of
-the original setup are kept, even though they are BIND 9 specific and
-not currently used.
+should share the same framework for both versions, so some part of the
+original setup are kept, even though they are BIND 9 specific and not
+currently used.
 
-Also, these tests generally rely on BIND 9 programs, most commonly its
-dig, and will sometimes be its name server (named).  So, the test
+Also, these tests generally rely on BIND 9 programs, most commonly
+its dig, and will sometimes be its name server (named).  So, the test
 environment assumes that there's a source tree of BIND 9 where its
-programs are built, and that an environment variable "BIND9_TOP" is
-set to point to the top directory of the source tree.
+programs are built, and that an environment variable "BIND9_TOP" is set
+to point to the top directory of the source tree.
 
 There are multiple test suites, each in a separate subdirectory and
 involving a different DNS setup.  They are:
 
   bindctl/      Some basic management operations using the bindctl tool
-  glue/		Glue handling tests
+  glue/		    Glue handling tests
+  ixfr/         Incremental transfer tests
+
 (the following tests are planned to be added soon)
-  dnssec/	DNSSEC tests
+  dnssec/	    DNSSEC tests
   masterfile/	Master file parser
-  xfer/		Zone transfer tests
+  axfr/         Full-transfer tests
 
 Typically each test suite sets up 2-5 instances of BIND 10 (or BIND 9
-named) and then performs one or more tests against them.  Within the
-test suite subdirectory, each instance has a separate subdirectory
-containing its configuration data.  By convention, these
-subdirectories are named "nsx1", "nsx2", etc for BIND 10 ("x" means
-BIND 10), and "ns1", "ns2", etc. for BIND 9.
+named) and then performs one or more tests against them.  Within the test
+suite subdirectory, each instance has a separate subdirectory containing
+its configuration data.  By convention, these subdirectories are named
+"nsx1", "nsx2", etc for BIND 10 ("x" means BIND 10), and "ns1", "ns2",
+etc. for BIND 9.
 
 The tests are completely self-contained and do not require access to
-the real DNS.  Generally, one of the test servers (ns[x]1) is set up
-as a root name server and is listed in the hints file of the others.
+the real DNS.  Generally, one of the test servers (ns[x]1) is set up as
+a root name server and is listed in the hints file of the others.
 
-To enable all servers to run on the same machine, they bind to
-separate virtual IP address on the loopback interface.  ns[x]1 runs on
-10.53.0.1, ns[x]2 on 10.53.0.2, etc.  Before running any tests, you
-must set up these addresses by running "ifconfig.sh up" as root.
+To enable all servers to run on the same machine, they bind to separate
+virtual IP address on the loopback interface.  ns[x]1 runs on 10.53.0.1,
+ns[x]2 on 10.53.0.2, etc.  Before running any tests, you must set up
+these addresses by running "ifconfig.sh up" as root.
 
 Mac OS X:
-If you wish to make the interfaces survive across reboots
-copy org.isc.bind.system and org.isc.bind.system to
-/Library/LaunchDaemons then run
-"launchctl load /Library/LaunchDaemons/org.isc.bind.system.plist" as
-root.
+If you wish to make the interfaces survive across reboots copy
+org.isc.bind.system and org.isc.bind.system to /Library/LaunchDaemons
+then run "launchctl load /Library/LaunchDaemons/org.isc.bind.system.plist"
+as root.
 
 The servers use port 53210 instead of the usual port 53, so they can be
 run without root privileges once the interfaces have been set up.
diff --git a/tests/system/bindctl/tests.sh b/tests/system/bindctl/tests.sh
index 6923c41..565b306 100755
--- a/tests/system/bindctl/tests.sh
+++ b/tests/system/bindctl/tests.sh
@@ -24,6 +24,10 @@ SYSTEMTESTTOP=..
 status=0
 n=0
 
+# TODO: consider consistency with statistics definition in auth.spec
+auth_queries_tcp="\<queries\.tcp\>"
+auth_queries_udp="\<queries\.udp\>"
+
 echo "I:Checking b10-auth is working by default ($n)"
 $DIG +norec @10.53.0.1 -p 53210 ns.example.com. A >dig.out.$n || status=1
 # perform a simple check on the output (digcomp would be too much for this)
@@ -40,13 +44,13 @@ echo 'Stats show
 	--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
 # the server should have received 1 UDP and 1 TCP queries (TCP query was
 # sent from the server startup script)
-grep "\"auth.queries.tcp\": 1," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<1\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
 if [ $status != 0 ]; then echo "I:failed"; fi
 n=`expr $n + 1`
 
 echo "I:Stopping b10-auth and checking that ($n)"
-echo 'config set Boss/start_auth false
+echo 'config remove Boss/components b10-auth
 config commit
 quit
 ' | $RUN_BINDCTL \
@@ -57,7 +61,8 @@ if [ $status != 0 ]; then echo "I:failed"; fi
 n=`expr $n + 1`
 
 echo "I:Restarting b10-auth and checking that ($n)"
-echo 'config set Boss/start_auth true
+echo 'config add Boss/components b10-auth
+config set Boss/components/b10-auth { "special": "auth", "kind": "needed" }
 config commit
 quit
 ' | $RUN_BINDCTL \
@@ -73,8 +78,8 @@ echo 'Stats show
 ' | $RUN_BINDCTL \
 	--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
 # The statistics counters should have been reset while stop/start.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 1," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<1\>" bindctl.out.$n > /dev/null || status=1
 if [ $status != 0 ]; then echo "I:failed"; fi
 n=`expr $n + 1`
 
@@ -97,8 +102,8 @@ echo 'Stats show
 ' | $RUN_BINDCTL \
 	--csv-file-dir=$BINDCTL_CSV_DIR > bindctl.out.$n || status=1
 # The statistics counters shouldn't be reset due to hot-swapping datasource.
-grep "\"auth.queries.tcp\": 0," bindctl.out.$n > /dev/null || status=1
-grep "\"auth.queries.udp\": 2," bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_tcp".*\<0\>" bindctl.out.$n > /dev/null || status=1
+grep $auth_queries_udp".*\<2\>" bindctl.out.$n > /dev/null || status=1
 if [ $status != 0 ]; then echo "I:failed"; fi
 n=`expr $n + 1`
 
diff --git a/tests/system/cleanall.sh b/tests/system/cleanall.sh
index 17c3d4a..434c6b1 100755
--- a/tests/system/cleanall.sh
+++ b/tests/system/cleanall.sh
@@ -27,7 +27,10 @@ find . -type f \( \
 
 status=0
 
-for d in `find . -type d -maxdepth 1 -mindepth 1 -print`
+for d in ./.* ./* ./*/*
 do
+   case $d in ./.|./..) continue ;; esac
+   test -d $d || continue
+
    test ! -f $d/clean.sh || ( cd $d && sh clean.sh )
 done
diff --git a/tests/system/common/rndc.conf b/tests/system/common/rndc.conf
new file mode 100644
index 0000000..a897548
--- /dev/null
+++ b/tests/system/common/rndc.conf
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004, 2007  Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (C) 2000, 2001  Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+options {
+        default-key     "rndc_key";
+};
+
+key rndc_key {
+        algorithm       hmac-md5;
+        secret          "1234abcd8765";
+};
diff --git a/tests/system/common/rndc.key b/tests/system/common/rndc.key
new file mode 100644
index 0000000..c2c3457
--- /dev/null
+++ b/tests/system/common/rndc.key
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id: rndc.key,v 1.3 2011-03-12 04:59:47 tbox Exp $ */
+
+key rndc_key {
+        secret "1234abcd8765";
+        algorithm hmac-md5;
+};
diff --git a/tests/system/conf.sh.in b/tests/system/conf.sh.in
index 66aa3f5..92f72fa 100755
--- a/tests/system/conf.sh.in
+++ b/tests/system/conf.sh.in
@@ -35,23 +35,36 @@ if [ -z $BIND9_TOP ]; then
 fi
 
 # Find the top of the source and test trees.
-TOP=@abs_top_srcdir@
-TEST_TOP=@abs_builddir@
-
-RUN_BIND10=$TOP/src/bin/bind10/run_bind10.sh
-RUN_BINDCTL=$TOP/src/bin/bindctl/run_bindctl.sh
-BINDCTL_CSV_DIR=@abs_srcdir@/common/
-B10_LOADZONE=$TOP/src/bin/loadzone/run_loadzone.sh
-BIND9_NAMED=$BIND9_TOP/bin/named/named
-DIG=$BIND9_TOP/bin/dig/dig
+export TOP=@abs_top_srcdir@
+export TEST_TOP=@abs_builddir@
+
+# Programs
+export RUN_BIND10=$TOP/src/bin/bind10/run_bind10.sh
+export RUN_BINDCTL=$TOP/src/bin/bindctl/run_bindctl.sh
+export BINDCTL_CSV_DIR=@abs_srcdir@/common/
+export B10_LOADZONE=$TOP/src/bin/loadzone/run_loadzone.sh
+export BIND9_NAMED=$BIND9_TOP/bin/named/named
+export DIG=$BIND9_TOP/bin/dig/dig
+export RNDC=$BIND9_TOP/bin/rndc/rndc
+
 # Test tools borrowed from BIND 9's system test (without change).
-TESTSOCK=$BIND9_TOP/bin/tests/system/testsock.pl
-DIGCOMP=$BIND9_TOP/bin/tests/system/digcomp.pl
+export TESTSOCK=$BIND9_TOP/bin/tests/system/testsock.pl
+export DIGCOMP=$BIND9_TOP/bin/tests/system/digcomp.pl
+
+export SUBDIRS="bindctl glue ixfr/in-2"
+# Add appropriate subdirectories to the above statement as the tests become
+# available.
+#SUBDIRS="dnssec masterfile ixfr/in-1 ixfr/in-2 ixfr/in-4"
 
-SUBDIRS="bindctl glue"
-#SUBDIRS="dnssec masterfile xfer"
+# PERL will be an empty string if no perl interpreter was found.  A similar
+# comment applies to AWK.
+export PERL=@PERL@
+export AWK=@AWK@
 
-# PERL will be an empty string if no perl interpreter was found.
-PERL=@PERL@
+# Other constants
+export RNDC_PORT=9953
+export DNS_PORT=53210
 
-export RUN_BIND10 BIND9_NAMED DIG SUBDIRS PERL TESTSOCK
+export TESTS_TOP=$TOP/tests
+export SYSTEM_TOP=$TESTS_TOP/system
+export IXFR_TOP=$SYSTEM_TOP/ixfr
diff --git a/tests/system/ixfr/README b/tests/system/ixfr/README
new file mode 100644
index 0000000..51cba8a
--- /dev/null
+++ b/tests/system/ixfr/README
@@ -0,0 +1,86 @@
+Introduction
+============
+The directories in-1 to in-4 implement the following tests of the IXFR-in
+capability of BIND 10.
+
+in-1: Check that BIND 10 can receive IXFR in a single UDP packet.
+in-2: Check that BIND 10 can receive IXFR via TCP.
+in-3: Check that BIND 10 will request AXFR if the server does not support IXFR.
+in-4: Check that BIND 10 will request IXFR when its SOA refresh times out
+
+The tests are described more fully in the document:
+
+http://bind10.isc.org/wiki/IxfrSystemTests
+
+Overview
+========
+All the tests use two nameservers:
+
+* A BIND 9 nameserver acting as the IXFR server (using the nomenclature
+of RFC 1995).
+* A BIND 10 nameserver acting at the IXFR client.
+
+In general, the tests attempt to set up the server and client independently.
+Communication is established between the systems by updating their
+configurations and a notification sent to the client.  This should cause the
+client to request an IXFR from the server. (The exception is test 4, where the
+request is a result of the expiration of the SOA refresh time.)
+
+A check of zone files - or in these tests, of SOA serial number - can only
+reveal that a transfer has taken place.  To check what has happened,
+e.g. whether the transfer was via UDP or whether a TCP request took place,
+the BIND 10 log file is searched for known message IDs.
+
+The searching of the log files for message IDs is one of the reasons that,
+unlike other system tests, the IXFR set of tests is broken up into separate
+tests that require the stopping and starting of nameservers (and tidying up of
+log files) between each test.  Doing this means that only the existence of a
+particular message ID needs to be checked - there is no risk that another test
+produced it.  The other reason is that the each IXFR test requires the
+nameservers to be in a specific state at the start of the test; this is easier
+to assure if they are not updating one another as the result of configuration
+settings established in the previous test.
+
+Test Files
+==========
+
+Data Files
+----------
+(All within tests/system/ixfr.  Some .in files are processed to substitute
+for build variables in the build process to give the files listed here.)
+
+db.example.nX. These files hold the RRs for a zone for which should not
+fit within a single UDP packet.  The files are different versions of the zone
+- the N-0 version (i.e. the latest version - "N" - the "-0" is present so
+that the files have a consistent name), N-2 etc. (See the full description
+of the tests for the meaning of N-2 etc.)
+
+db.example.common: A set of RRs to bulk out the zone to be larger than can
+be contained in a single UDP packet.
+
+db.example.n2.refresh: The N-2 version of the zone, but with a small SOA
+refresh time (for test 4).
+
+named_xxxx.conf: Various BIND 9 configuration files with NOTIFYs and/or
+IXFR enabled or disabled.
+
+Directories
+-----------
+The tests/system/ixfr directory holds the IXFR tests.  Within that
+directory are subdirectories in-1 through in-4 for each test.  And within
+each test directory are the directories ns1 (for the BIND 9 nameserver)
+and nsx2 (for the BIND 10 nameserver).
+
+Shell Scripts
+-------------
+The IXFR tests use the same framework as the rest of the system tests,
+being based around shell scripts.  Many have a ".in" form as they require
+substitution of build variables before they can be used, and so are
+listed in configure.ac.  The files specific to the IXFR tests are:
+
+tests/system/ixfr/ixfr_init.sh.in: defines environment variables and shell
+subroutines used in the tests.  (This references system/conf.sh.in which
+defines most of them.)
+
+tests/system/ixfr/common_tests.sh.in: tests in-1 and in-2 are virtually
+identical - this holds the common code.
diff --git a/tests/system/ixfr/b10-config.db.in b/tests/system/ixfr/b10-config.db.in
new file mode 100644
index 0000000..946d80f
--- /dev/null
+++ b/tests/system/ixfr/b10-config.db.in
@@ -0,0 +1,23 @@
+{"version": 2,
+    "Xfrin": {
+        "zones": [{
+            "master_addr": "10.53.0.1",
+            "master_port": 53210,
+            "name": "example.",
+            "use_ixfr": true
+        }]
+    },
+    "Auth": {
+        "listen_on": [{
+            "address": "10.53.0.2",
+            "port": 53210
+        }],
+        "database_file": "@abs_builddir@/zone.sqlite3"
+    },
+    "Zonemgr": {
+        "secondary_zones": [{
+            "name": "example.",
+            "class": "IN"
+        }]
+    }
+}
diff --git a/tests/system/ixfr/clean_ns.sh b/tests/system/ixfr/clean_ns.sh
new file mode 100644
index 0000000..88f4ff1
--- /dev/null
+++ b/tests/system/ixfr/clean_ns.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Clean up nameserver directories after zone transfer tests.
+
+rm -f ns1/named.conf
+rm -f ns1/db.example*
+rm -f ns1/named.memstats
+
+rm -f nsx2/bind10.run
+rm -f nsx2/b10-config.db
+rm -f ../zone.sqlite3
+
+rm -f client.dig
+rm -f server.dig
diff --git a/tests/system/ixfr/common_tests.sh.in b/tests/system/ixfr/common_tests.sh.in
new file mode 100644
index 0000000..90d0284
--- /dev/null
+++ b/tests/system/ixfr/common_tests.sh.in
@@ -0,0 +1,78 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script is used in a couple of IXFR tests.
+#
+# Preconditions:\n
+# The BIND 9 nameserver (ns1, 10.53.0.1, acting as the IXFR server) is loaded
+# with the N-4 version of the zone.  (It may hold prior versions as well.)
+# Notifications are disabled.
+#
+# The BIND 10 nameserver (nsx2, 10.53.0.2, acting as the IXFR client) is loaded
+# with an earlier (unspecified) version of the zone.
+#
+# Actions:\n
+# This script updates the IXFR server with the N-2 and N-0 versions of the zone.
+# It then updates the BIND 10 configuration so that it looks for IXFRs from
+# the IXFR server and causes the server to send the client a NOTIFY.  After
+# waiting for the client to update from the server, it compares ther zones of
+# the two system, reporting an error if they are different.
+#
+# Caller Actions:\n
+# The caller can pre-load the BIND 10 IXFR client with whatever version of the
+# zone it requires.  It can also load the BIND 9 IXFR server with zones earlier
+# than N-4.
+#
+# After this test has finished, it is up to the caller to check the logs
+# to see if they report the expected behavior.
+#
+# \return 0 if the script executed successfully, non-zero otherwise
+
+# Set up variables etc.
+. @abs_top_builddir@/tests/system/conf.sh
+. $IXFR_TOP/ixfr_init.sh
+
+set -e
+
+# Store the SOA serial number of the BIND 10 client for later use.
+old_client_serial=`$DIG_SOA @$CLIENT_IP | $AWK '{print $3}'`
+echo "I:$CLIENT_NAME SOA serial of IXFR client is $old_client_serial"
+
+# Load the BIND 9 system (the IXFR server) with the "n - 2" and "n" version of
+# the zones.  With ixfr-from-differences set to "yes", the nameserver should
+# generate the differences between them.
+echo "I:$SERVER_NAME updating IXFR-server for ixfr-in tests"
+update_server_zone $SERVER_NAME $SERVER_IP $IXFR_TOP/db.example.n2
+
+# Wait a bit - it seems that if two updates are loaded in quick succession,
+# the second sometimes gets lost.
+sleep 5
+update_server_zone $SERVER_NAME $SERVER_IP $IXFR_TOP/db.example.n0
+
+echo "I:$CLIENT_NAME forcing IXFR client to retrieve new version of the zone"
+$RUN_BINDCTL << .
+Xfrin retransfer zone_name="example"
+.
+
+# Wait for the client to update itself.
+wait_for_update $CLIENT_NAME $CLIENT_IP $old_client_serial
+
+# Has updated, compare the client and server's versions of the zone s- they
+# should be the same.
+compare_zones $SERVER_NAME $SERVER_IP $CLIENT_NAME $CLIENT_IP
+
+set +e
diff --git a/tests/system/ixfr/db.example.common b/tests/system/ixfr/db.example.common
new file mode 100644
index 0000000..90435ce
--- /dev/null
+++ b/tests/system/ixfr/db.example.common
@@ -0,0 +1,1556 @@
+; Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+; This files holds a number of AAAA records to bulk out a zone file beyond
+; 16kB.  It is used in tests where it is required that the contents of a zone
+; do not fit into a single UDP packet.
+
+aaaa-000		IN	AAAA	2001:db8::0000
+aaaa-001		IN	AAAA	2001:db8::0001
+aaaa-002		IN	AAAA	2001:db8::0002
+aaaa-003		IN	AAAA	2001:db8::0003
+aaaa-004		IN	AAAA	2001:db8::0004
+aaaa-005		IN	AAAA	2001:db8::0005
+aaaa-006		IN	AAAA	2001:db8::0006
+aaaa-007		IN	AAAA	2001:db8::0007
+aaaa-008		IN	AAAA	2001:db8::0008
+aaaa-009		IN	AAAA	2001:db8::0009
+aaaa-010		IN	AAAA	2001:db8::000a
+aaaa-011		IN	AAAA	2001:db8::000b
+aaaa-012		IN	AAAA	2001:db8::000c
+aaaa-013		IN	AAAA	2001:db8::000d
+aaaa-014		IN	AAAA	2001:db8::000e
+aaaa-015		IN	AAAA	2001:db8::000f
+aaaa-016		IN	AAAA	2001:db8::0010
+aaaa-017		IN	AAAA	2001:db8::0011
+aaaa-018		IN	AAAA	2001:db8::0012
+aaaa-019		IN	AAAA	2001:db8::0013
+aaaa-020		IN	AAAA	2001:db8::0014
+aaaa-021		IN	AAAA	2001:db8::0015
+aaaa-022		IN	AAAA	2001:db8::0016
+aaaa-023		IN	AAAA	2001:db8::0017
+aaaa-024		IN	AAAA	2001:db8::0018
+aaaa-025		IN	AAAA	2001:db8::0019
+aaaa-026		IN	AAAA	2001:db8::001a
+aaaa-027		IN	AAAA	2001:db8::001b
+aaaa-028		IN	AAAA	2001:db8::001c
+aaaa-029		IN	AAAA	2001:db8::001d
+aaaa-030		IN	AAAA	2001:db8::001e
+aaaa-031		IN	AAAA	2001:db8::001f
+aaaa-032		IN	AAAA	2001:db8::0020
+aaaa-033		IN	AAAA	2001:db8::0021
+aaaa-034		IN	AAAA	2001:db8::0022
+aaaa-035		IN	AAAA	2001:db8::0023
+aaaa-036		IN	AAAA	2001:db8::0024
+aaaa-037		IN	AAAA	2001:db8::0025
+aaaa-038		IN	AAAA	2001:db8::0026
+aaaa-039		IN	AAAA	2001:db8::0027
+aaaa-040		IN	AAAA	2001:db8::0028
+aaaa-041		IN	AAAA	2001:db8::0029
+aaaa-042		IN	AAAA	2001:db8::002a
+aaaa-043		IN	AAAA	2001:db8::002b
+aaaa-044		IN	AAAA	2001:db8::002c
+aaaa-045		IN	AAAA	2001:db8::002d
+aaaa-046		IN	AAAA	2001:db8::002e
+aaaa-047		IN	AAAA	2001:db8::002f
+aaaa-048		IN	AAAA	2001:db8::0030
+aaaa-049		IN	AAAA	2001:db8::0031
+aaaa-050		IN	AAAA	2001:db8::0032
+aaaa-051		IN	AAAA	2001:db8::0033
+aaaa-052		IN	AAAA	2001:db8::0034
+aaaa-053		IN	AAAA	2001:db8::0035
+aaaa-054		IN	AAAA	2001:db8::0036
+aaaa-055		IN	AAAA	2001:db8::0037
+aaaa-056		IN	AAAA	2001:db8::0038
+aaaa-057		IN	AAAA	2001:db8::0039
+aaaa-058		IN	AAAA	2001:db8::003a
+aaaa-059		IN	AAAA	2001:db8::003b
+aaaa-060		IN	AAAA	2001:db8::003c
+aaaa-061		IN	AAAA	2001:db8::003d
+aaaa-062		IN	AAAA	2001:db8::003e
+aaaa-063		IN	AAAA	2001:db8::003f
+aaaa-064		IN	AAAA	2001:db8::0040
+aaaa-065		IN	AAAA	2001:db8::0041
+aaaa-066		IN	AAAA	2001:db8::0042
+aaaa-067		IN	AAAA	2001:db8::0043
+aaaa-068		IN	AAAA	2001:db8::0044
+aaaa-069		IN	AAAA	2001:db8::0045
+aaaa-070		IN	AAAA	2001:db8::0046
+aaaa-071		IN	AAAA	2001:db8::0047
+aaaa-072		IN	AAAA	2001:db8::0048
+aaaa-073		IN	AAAA	2001:db8::0049
+aaaa-074		IN	AAAA	2001:db8::004a
+aaaa-075		IN	AAAA	2001:db8::004b
+aaaa-076		IN	AAAA	2001:db8::004c
+aaaa-077		IN	AAAA	2001:db8::004d
+aaaa-078		IN	AAAA	2001:db8::004e
+aaaa-079		IN	AAAA	2001:db8::004f
+aaaa-080		IN	AAAA	2001:db8::0050
+aaaa-081		IN	AAAA	2001:db8::0051
+aaaa-082		IN	AAAA	2001:db8::0052
+aaaa-083		IN	AAAA	2001:db8::0053
+aaaa-084		IN	AAAA	2001:db8::0054
+aaaa-085		IN	AAAA	2001:db8::0055
+aaaa-086		IN	AAAA	2001:db8::0056
+aaaa-087		IN	AAAA	2001:db8::0057
+aaaa-088		IN	AAAA	2001:db8::0058
+aaaa-089		IN	AAAA	2001:db8::0059
+aaaa-090		IN	AAAA	2001:db8::005a
+aaaa-091		IN	AAAA	2001:db8::005b
+aaaa-092		IN	AAAA	2001:db8::005c
+aaaa-093		IN	AAAA	2001:db8::005d
+aaaa-094		IN	AAAA	2001:db8::005e
+aaaa-095		IN	AAAA	2001:db8::005f
+aaaa-096		IN	AAAA	2001:db8::0060
+aaaa-097		IN	AAAA	2001:db8::0061
+aaaa-098		IN	AAAA	2001:db8::0062
+aaaa-099		IN	AAAA	2001:db8::0063
+aaaa-100		IN	AAAA	2001:db8::0064
+aaaa-101		IN	AAAA	2001:db8::0065
+aaaa-102		IN	AAAA	2001:db8::0066
+aaaa-103		IN	AAAA	2001:db8::0067
+aaaa-104		IN	AAAA	2001:db8::0068
+aaaa-105		IN	AAAA	2001:db8::0069
+aaaa-106		IN	AAAA	2001:db8::006a
+aaaa-107		IN	AAAA	2001:db8::006b
+aaaa-108		IN	AAAA	2001:db8::006c
+aaaa-109		IN	AAAA	2001:db8::006d
+aaaa-110		IN	AAAA	2001:db8::006e
+aaaa-111		IN	AAAA	2001:db8::006f
+aaaa-112		IN	AAAA	2001:db8::0070
+aaaa-113		IN	AAAA	2001:db8::0071
+aaaa-114		IN	AAAA	2001:db8::0072
+aaaa-115		IN	AAAA	2001:db8::0073
+aaaa-116		IN	AAAA	2001:db8::0074
+aaaa-117		IN	AAAA	2001:db8::0075
+aaaa-118		IN	AAAA	2001:db8::0076
+aaaa-119		IN	AAAA	2001:db8::0077
+aaaa-120		IN	AAAA	2001:db8::0078
+aaaa-121		IN	AAAA	2001:db8::0079
+aaaa-122		IN	AAAA	2001:db8::007a
+aaaa-123		IN	AAAA	2001:db8::007b
+aaaa-124		IN	AAAA	2001:db8::007c
+aaaa-125		IN	AAAA	2001:db8::007d
+aaaa-126		IN	AAAA	2001:db8::007e
+aaaa-127		IN	AAAA	2001:db8::007f
+aaaa-128		IN	AAAA	2001:db8::0080
+aaaa-129		IN	AAAA	2001:db8::0081
+aaaa-130		IN	AAAA	2001:db8::0082
+aaaa-131		IN	AAAA	2001:db8::0083
+aaaa-132		IN	AAAA	2001:db8::0084
+aaaa-133		IN	AAAA	2001:db8::0085
+aaaa-134		IN	AAAA	2001:db8::0086
+aaaa-135		IN	AAAA	2001:db8::0087
+aaaa-136		IN	AAAA	2001:db8::0088
+aaaa-137		IN	AAAA	2001:db8::0089
+aaaa-138		IN	AAAA	2001:db8::008a
+aaaa-139		IN	AAAA	2001:db8::008b
+aaaa-140		IN	AAAA	2001:db8::008c
+aaaa-141		IN	AAAA	2001:db8::008d
+aaaa-142		IN	AAAA	2001:db8::008e
+aaaa-143		IN	AAAA	2001:db8::008f
+aaaa-144		IN	AAAA	2001:db8::0090
+aaaa-145		IN	AAAA	2001:db8::0091
+aaaa-146		IN	AAAA	2001:db8::0092
+aaaa-147		IN	AAAA	2001:db8::0093
+aaaa-148		IN	AAAA	2001:db8::0094
+aaaa-149		IN	AAAA	2001:db8::0095
+aaaa-150		IN	AAAA	2001:db8::0096
+aaaa-151		IN	AAAA	2001:db8::0097
+aaaa-152		IN	AAAA	2001:db8::0098
+aaaa-153		IN	AAAA	2001:db8::0099
+aaaa-154		IN	AAAA	2001:db8::009a
+aaaa-155		IN	AAAA	2001:db8::009b
+aaaa-156		IN	AAAA	2001:db8::009c
+aaaa-157		IN	AAAA	2001:db8::009d
+aaaa-158		IN	AAAA	2001:db8::009e
+aaaa-159		IN	AAAA	2001:db8::009f
+aaaa-160		IN	AAAA	2001:db8::00a0
+aaaa-161		IN	AAAA	2001:db8::00a1
+aaaa-162		IN	AAAA	2001:db8::00a2
+aaaa-163		IN	AAAA	2001:db8::00a3
+aaaa-164		IN	AAAA	2001:db8::00a4
+aaaa-165		IN	AAAA	2001:db8::00a5
+aaaa-166		IN	AAAA	2001:db8::00a6
+aaaa-167		IN	AAAA	2001:db8::00a7
+aaaa-168		IN	AAAA	2001:db8::00a8
+aaaa-169		IN	AAAA	2001:db8::00a9
+aaaa-170		IN	AAAA	2001:db8::00aa
+aaaa-171		IN	AAAA	2001:db8::00ab
+aaaa-172		IN	AAAA	2001:db8::00ac
+aaaa-173		IN	AAAA	2001:db8::00ad
+aaaa-174		IN	AAAA	2001:db8::00ae
+aaaa-175		IN	AAAA	2001:db8::00af
+aaaa-176		IN	AAAA	2001:db8::00b0
+aaaa-177		IN	AAAA	2001:db8::00b1
+aaaa-178		IN	AAAA	2001:db8::00b2
+aaaa-179		IN	AAAA	2001:db8::00b3
+aaaa-180		IN	AAAA	2001:db8::00b4
+aaaa-181		IN	AAAA	2001:db8::00b5
+aaaa-182		IN	AAAA	2001:db8::00b6
+aaaa-183		IN	AAAA	2001:db8::00b7
+aaaa-184		IN	AAAA	2001:db8::00b8
+aaaa-185		IN	AAAA	2001:db8::00b9
+aaaa-186		IN	AAAA	2001:db8::00ba
+aaaa-187		IN	AAAA	2001:db8::00bb
+aaaa-188		IN	AAAA	2001:db8::00bc
+aaaa-189		IN	AAAA	2001:db8::00bd
+aaaa-190		IN	AAAA	2001:db8::00be
+aaaa-191		IN	AAAA	2001:db8::00bf
+aaaa-192		IN	AAAA	2001:db8::00c0
+aaaa-193		IN	AAAA	2001:db8::00c1
+aaaa-194		IN	AAAA	2001:db8::00c2
+aaaa-195		IN	AAAA	2001:db8::00c3
+aaaa-196		IN	AAAA	2001:db8::00c4
+aaaa-197		IN	AAAA	2001:db8::00c5
+aaaa-198		IN	AAAA	2001:db8::00c6
+aaaa-199		IN	AAAA	2001:db8::00c7
+aaaa-200		IN	AAAA	2001:db8::00c8
+aaaa-201		IN	AAAA	2001:db8::00c9
+aaaa-202		IN	AAAA	2001:db8::00ca
+aaaa-203		IN	AAAA	2001:db8::00cb
+aaaa-204		IN	AAAA	2001:db8::00cc
+aaaa-205		IN	AAAA	2001:db8::00cd
+aaaa-206		IN	AAAA	2001:db8::00ce
+aaaa-207		IN	AAAA	2001:db8::00cf
+aaaa-208		IN	AAAA	2001:db8::00d0
+aaaa-209		IN	AAAA	2001:db8::00d1
+aaaa-210		IN	AAAA	2001:db8::00d2
+aaaa-211		IN	AAAA	2001:db8::00d3
+aaaa-212		IN	AAAA	2001:db8::00d4
+aaaa-213		IN	AAAA	2001:db8::00d5
+aaaa-214		IN	AAAA	2001:db8::00d6
+aaaa-215		IN	AAAA	2001:db8::00d7
+aaaa-216		IN	AAAA	2001:db8::00d8
+aaaa-217		IN	AAAA	2001:db8::00d9
+aaaa-218		IN	AAAA	2001:db8::00da
+aaaa-219		IN	AAAA	2001:db8::00db
+aaaa-220		IN	AAAA	2001:db8::00dc
+aaaa-221		IN	AAAA	2001:db8::00dd
+aaaa-222		IN	AAAA	2001:db8::00de
+aaaa-223		IN	AAAA	2001:db8::00df
+aaaa-224		IN	AAAA	2001:db8::00e0
+aaaa-225		IN	AAAA	2001:db8::00e1
+aaaa-226		IN	AAAA	2001:db8::00e2
+aaaa-227		IN	AAAA	2001:db8::00e3
+aaaa-228		IN	AAAA	2001:db8::00e4
+aaaa-229		IN	AAAA	2001:db8::00e5
+aaaa-230		IN	AAAA	2001:db8::00e6
+aaaa-231		IN	AAAA	2001:db8::00e7
+aaaa-232		IN	AAAA	2001:db8::00e8
+aaaa-233		IN	AAAA	2001:db8::00e9
+aaaa-234		IN	AAAA	2001:db8::00ea
+aaaa-235		IN	AAAA	2001:db8::00eb
+aaaa-236		IN	AAAA	2001:db8::00ec
+aaaa-237		IN	AAAA	2001:db8::00ed
+aaaa-238		IN	AAAA	2001:db8::00ee
+aaaa-239		IN	AAAA	2001:db8::00ef
+aaaa-240		IN	AAAA	2001:db8::00f0
+aaaa-241		IN	AAAA	2001:db8::00f1
+aaaa-242		IN	AAAA	2001:db8::00f2
+aaaa-243		IN	AAAA	2001:db8::00f3
+aaaa-244		IN	AAAA	2001:db8::00f4
+aaaa-245		IN	AAAA	2001:db8::00f5
+aaaa-246		IN	AAAA	2001:db8::00f6
+aaaa-247		IN	AAAA	2001:db8::00f7
+aaaa-248		IN	AAAA	2001:db8::00f8
+aaaa-249		IN	AAAA	2001:db8::00f9
+aaaa-250		IN	AAAA	2001:db8::00fa
+aaaa-251		IN	AAAA	2001:db8::00fb
+aaaa-252		IN	AAAA	2001:db8::00fc
+aaaa-253		IN	AAAA	2001:db8::00fd
+aaaa-254		IN	AAAA	2001:db8::00fe
+aaaa-255		IN	AAAA	2001:db8::00ff
+aaaa-256		IN	AAAA	2001:db8::0100
+aaaa-257		IN	AAAA	2001:db8::0101
+aaaa-258		IN	AAAA	2001:db8::0102
+aaaa-259		IN	AAAA	2001:db8::0103
+aaaa-260		IN	AAAA	2001:db8::0104
+aaaa-261		IN	AAAA	2001:db8::0105
+aaaa-262		IN	AAAA	2001:db8::0106
+aaaa-263		IN	AAAA	2001:db8::0107
+aaaa-264		IN	AAAA	2001:db8::0108
+aaaa-265		IN	AAAA	2001:db8::0109
+aaaa-266		IN	AAAA	2001:db8::010a
+aaaa-267		IN	AAAA	2001:db8::010b
+aaaa-268		IN	AAAA	2001:db8::010c
+aaaa-269		IN	AAAA	2001:db8::010d
+aaaa-270		IN	AAAA	2001:db8::010e
+aaaa-271		IN	AAAA	2001:db8::010f
+aaaa-272		IN	AAAA	2001:db8::0110
+aaaa-273		IN	AAAA	2001:db8::0111
+aaaa-274		IN	AAAA	2001:db8::0112
+aaaa-275		IN	AAAA	2001:db8::0113
+aaaa-276		IN	AAAA	2001:db8::0114
+aaaa-277		IN	AAAA	2001:db8::0115
+aaaa-278		IN	AAAA	2001:db8::0116
+aaaa-279		IN	AAAA	2001:db8::0117
+aaaa-280		IN	AAAA	2001:db8::0118
+aaaa-281		IN	AAAA	2001:db8::0119
+aaaa-282		IN	AAAA	2001:db8::011a
+aaaa-283		IN	AAAA	2001:db8::011b
+aaaa-284		IN	AAAA	2001:db8::011c
+aaaa-285		IN	AAAA	2001:db8::011d
+aaaa-286		IN	AAAA	2001:db8::011e
+aaaa-287		IN	AAAA	2001:db8::011f
+aaaa-288		IN	AAAA	2001:db8::0120
+aaaa-289		IN	AAAA	2001:db8::0121
+aaaa-290		IN	AAAA	2001:db8::0122
+aaaa-291		IN	AAAA	2001:db8::0123
+aaaa-292		IN	AAAA	2001:db8::0124
+aaaa-293		IN	AAAA	2001:db8::0125
+aaaa-294		IN	AAAA	2001:db8::0126
+aaaa-295		IN	AAAA	2001:db8::0127
+aaaa-296		IN	AAAA	2001:db8::0128
+aaaa-297		IN	AAAA	2001:db8::0129
+aaaa-298		IN	AAAA	2001:db8::012a
+aaaa-299		IN	AAAA	2001:db8::012b
+aaaa-300		IN	AAAA	2001:db8::012c
+aaaa-301		IN	AAAA	2001:db8::012d
+aaaa-302		IN	AAAA	2001:db8::012e
+aaaa-303		IN	AAAA	2001:db8::012f
+aaaa-304		IN	AAAA	2001:db8::0130
+aaaa-305		IN	AAAA	2001:db8::0131
+aaaa-306		IN	AAAA	2001:db8::0132
+aaaa-307		IN	AAAA	2001:db8::0133
+aaaa-308		IN	AAAA	2001:db8::0134
+aaaa-309		IN	AAAA	2001:db8::0135
+aaaa-310		IN	AAAA	2001:db8::0136
+aaaa-311		IN	AAAA	2001:db8::0137
+aaaa-312		IN	AAAA	2001:db8::0138
+aaaa-313		IN	AAAA	2001:db8::0139
+aaaa-314		IN	AAAA	2001:db8::013a
+aaaa-315		IN	AAAA	2001:db8::013b
+aaaa-316		IN	AAAA	2001:db8::013c
+aaaa-317		IN	AAAA	2001:db8::013d
+aaaa-318		IN	AAAA	2001:db8::013e
+aaaa-319		IN	AAAA	2001:db8::013f
+aaaa-320		IN	AAAA	2001:db8::0140
+aaaa-321		IN	AAAA	2001:db8::0141
+aaaa-322		IN	AAAA	2001:db8::0142
+aaaa-323		IN	AAAA	2001:db8::0143
+aaaa-324		IN	AAAA	2001:db8::0144
+aaaa-325		IN	AAAA	2001:db8::0145
+aaaa-326		IN	AAAA	2001:db8::0146
+aaaa-327		IN	AAAA	2001:db8::0147
+aaaa-328		IN	AAAA	2001:db8::0148
+aaaa-329		IN	AAAA	2001:db8::0149
+aaaa-330		IN	AAAA	2001:db8::014a
+aaaa-331		IN	AAAA	2001:db8::014b
+aaaa-332		IN	AAAA	2001:db8::014c
+aaaa-333		IN	AAAA	2001:db8::014d
+aaaa-334		IN	AAAA	2001:db8::014e
+aaaa-335		IN	AAAA	2001:db8::014f
+aaaa-336		IN	AAAA	2001:db8::0150
+aaaa-337		IN	AAAA	2001:db8::0151
+aaaa-338		IN	AAAA	2001:db8::0152
+aaaa-339		IN	AAAA	2001:db8::0153
+aaaa-340		IN	AAAA	2001:db8::0154
+aaaa-341		IN	AAAA	2001:db8::0155
+aaaa-342		IN	AAAA	2001:db8::0156
+aaaa-343		IN	AAAA	2001:db8::0157
+aaaa-344		IN	AAAA	2001:db8::0158
+aaaa-345		IN	AAAA	2001:db8::0159
+aaaa-346		IN	AAAA	2001:db8::015a
+aaaa-347		IN	AAAA	2001:db8::015b
+aaaa-348		IN	AAAA	2001:db8::015c
+aaaa-349		IN	AAAA	2001:db8::015d
+aaaa-350		IN	AAAA	2001:db8::015e
+aaaa-351		IN	AAAA	2001:db8::015f
+aaaa-352		IN	AAAA	2001:db8::0160
+aaaa-353		IN	AAAA	2001:db8::0161
+aaaa-354		IN	AAAA	2001:db8::0162
+aaaa-355		IN	AAAA	2001:db8::0163
+aaaa-356		IN	AAAA	2001:db8::0164
+aaaa-357		IN	AAAA	2001:db8::0165
+aaaa-358		IN	AAAA	2001:db8::0166
+aaaa-359		IN	AAAA	2001:db8::0167
+aaaa-360		IN	AAAA	2001:db8::0168
+aaaa-361		IN	AAAA	2001:db8::0169
+aaaa-362		IN	AAAA	2001:db8::016a
+aaaa-363		IN	AAAA	2001:db8::016b
+aaaa-364		IN	AAAA	2001:db8::016c
+aaaa-365		IN	AAAA	2001:db8::016d
+aaaa-366		IN	AAAA	2001:db8::016e
+aaaa-367		IN	AAAA	2001:db8::016f
+aaaa-368		IN	AAAA	2001:db8::0170
+aaaa-369		IN	AAAA	2001:db8::0171
+aaaa-370		IN	AAAA	2001:db8::0172
+aaaa-371		IN	AAAA	2001:db8::0173
+aaaa-372		IN	AAAA	2001:db8::0174
+aaaa-373		IN	AAAA	2001:db8::0175
+aaaa-374		IN	AAAA	2001:db8::0176
+aaaa-375		IN	AAAA	2001:db8::0177
+aaaa-376		IN	AAAA	2001:db8::0178
+aaaa-377		IN	AAAA	2001:db8::0179
+aaaa-378		IN	AAAA	2001:db8::017a
+aaaa-379		IN	AAAA	2001:db8::017b
+aaaa-380		IN	AAAA	2001:db8::017c
+aaaa-381		IN	AAAA	2001:db8::017d
+aaaa-382		IN	AAAA	2001:db8::017e
+aaaa-383		IN	AAAA	2001:db8::017f
+aaaa-384		IN	AAAA	2001:db8::0180
+aaaa-385		IN	AAAA	2001:db8::0181
+aaaa-386		IN	AAAA	2001:db8::0182
+aaaa-387		IN	AAAA	2001:db8::0183
+aaaa-388		IN	AAAA	2001:db8::0184
+aaaa-389		IN	AAAA	2001:db8::0185
+aaaa-390		IN	AAAA	2001:db8::0186
+aaaa-391		IN	AAAA	2001:db8::0187
+aaaa-392		IN	AAAA	2001:db8::0188
+aaaa-393		IN	AAAA	2001:db8::0189
+aaaa-394		IN	AAAA	2001:db8::018a
+aaaa-395		IN	AAAA	2001:db8::018b
+aaaa-396		IN	AAAA	2001:db8::018c
+aaaa-397		IN	AAAA	2001:db8::018d
+aaaa-398		IN	AAAA	2001:db8::018e
+aaaa-399		IN	AAAA	2001:db8::018f
+aaaa-400		IN	AAAA	2001:db8::0190
+aaaa-401		IN	AAAA	2001:db8::0191
+aaaa-402		IN	AAAA	2001:db8::0192
+aaaa-403		IN	AAAA	2001:db8::0193
+aaaa-404		IN	AAAA	2001:db8::0194
+aaaa-405		IN	AAAA	2001:db8::0195
+aaaa-406		IN	AAAA	2001:db8::0196
+aaaa-407		IN	AAAA	2001:db8::0197
+aaaa-408		IN	AAAA	2001:db8::0198
+aaaa-409		IN	AAAA	2001:db8::0199
+aaaa-410		IN	AAAA	2001:db8::019a
+aaaa-411		IN	AAAA	2001:db8::019b
+aaaa-412		IN	AAAA	2001:db8::019c
+aaaa-413		IN	AAAA	2001:db8::019d
+aaaa-414		IN	AAAA	2001:db8::019e
+aaaa-415		IN	AAAA	2001:db8::019f
+aaaa-416		IN	AAAA	2001:db8::01a0
+aaaa-417		IN	AAAA	2001:db8::01a1
+aaaa-418		IN	AAAA	2001:db8::01a2
+aaaa-419		IN	AAAA	2001:db8::01a3
+aaaa-420		IN	AAAA	2001:db8::01a4
+aaaa-421		IN	AAAA	2001:db8::01a5
+aaaa-422		IN	AAAA	2001:db8::01a6
+aaaa-423		IN	AAAA	2001:db8::01a7
+aaaa-424		IN	AAAA	2001:db8::01a8
+aaaa-425		IN	AAAA	2001:db8::01a9
+aaaa-426		IN	AAAA	2001:db8::01aa
+aaaa-427		IN	AAAA	2001:db8::01ab
+aaaa-428		IN	AAAA	2001:db8::01ac
+aaaa-429		IN	AAAA	2001:db8::01ad
+aaaa-430		IN	AAAA	2001:db8::01ae
+aaaa-431		IN	AAAA	2001:db8::01af
+aaaa-432		IN	AAAA	2001:db8::01b0
+aaaa-433		IN	AAAA	2001:db8::01b1
+aaaa-434		IN	AAAA	2001:db8::01b2
+aaaa-435		IN	AAAA	2001:db8::01b3
+aaaa-436		IN	AAAA	2001:db8::01b4
+aaaa-437		IN	AAAA	2001:db8::01b5
+aaaa-438		IN	AAAA	2001:db8::01b6
+aaaa-439		IN	AAAA	2001:db8::01b7
+aaaa-440		IN	AAAA	2001:db8::01b8
+aaaa-441		IN	AAAA	2001:db8::01b9
+aaaa-442		IN	AAAA	2001:db8::01ba
+aaaa-443		IN	AAAA	2001:db8::01bb
+aaaa-444		IN	AAAA	2001:db8::01bc
+aaaa-445		IN	AAAA	2001:db8::01bd
+aaaa-446		IN	AAAA	2001:db8::01be
+aaaa-447		IN	AAAA	2001:db8::01bf
+aaaa-448		IN	AAAA	2001:db8::01c0
+aaaa-449		IN	AAAA	2001:db8::01c1
+aaaa-450		IN	AAAA	2001:db8::01c2
+aaaa-451		IN	AAAA	2001:db8::01c3
+aaaa-452		IN	AAAA	2001:db8::01c4
+aaaa-453		IN	AAAA	2001:db8::01c5
+aaaa-454		IN	AAAA	2001:db8::01c6
+aaaa-455		IN	AAAA	2001:db8::01c7
+aaaa-456		IN	AAAA	2001:db8::01c8
+aaaa-457		IN	AAAA	2001:db8::01c9
+aaaa-458		IN	AAAA	2001:db8::01ca
+aaaa-459		IN	AAAA	2001:db8::01cb
+aaaa-460		IN	AAAA	2001:db8::01cc
+aaaa-461		IN	AAAA	2001:db8::01cd
+aaaa-462		IN	AAAA	2001:db8::01ce
+aaaa-463		IN	AAAA	2001:db8::01cf
+aaaa-464		IN	AAAA	2001:db8::01d0
+aaaa-465		IN	AAAA	2001:db8::01d1
+aaaa-466		IN	AAAA	2001:db8::01d2
+aaaa-467		IN	AAAA	2001:db8::01d3
+aaaa-468		IN	AAAA	2001:db8::01d4
+aaaa-469		IN	AAAA	2001:db8::01d5
+aaaa-470		IN	AAAA	2001:db8::01d6
+aaaa-471		IN	AAAA	2001:db8::01d7
+aaaa-472		IN	AAAA	2001:db8::01d8
+aaaa-473		IN	AAAA	2001:db8::01d9
+aaaa-474		IN	AAAA	2001:db8::01da
+aaaa-475		IN	AAAA	2001:db8::01db
+aaaa-476		IN	AAAA	2001:db8::01dc
+aaaa-477		IN	AAAA	2001:db8::01dd
+aaaa-478		IN	AAAA	2001:db8::01de
+aaaa-479		IN	AAAA	2001:db8::01df
+aaaa-480		IN	AAAA	2001:db8::01e0
+aaaa-481		IN	AAAA	2001:db8::01e1
+aaaa-482		IN	AAAA	2001:db8::01e2
+aaaa-483		IN	AAAA	2001:db8::01e3
+aaaa-484		IN	AAAA	2001:db8::01e4
+aaaa-485		IN	AAAA	2001:db8::01e5
+aaaa-486		IN	AAAA	2001:db8::01e6
+aaaa-487		IN	AAAA	2001:db8::01e7
+aaaa-488		IN	AAAA	2001:db8::01e8
+aaaa-489		IN	AAAA	2001:db8::01e9
+aaaa-490		IN	AAAA	2001:db8::01ea
+aaaa-491		IN	AAAA	2001:db8::01eb
+aaaa-492		IN	AAAA	2001:db8::01ec
+aaaa-493		IN	AAAA	2001:db8::01ed
+aaaa-494		IN	AAAA	2001:db8::01ee
+aaaa-495		IN	AAAA	2001:db8::01ef
+aaaa-496		IN	AAAA	2001:db8::01f0
+aaaa-497		IN	AAAA	2001:db8::01f1
+aaaa-498		IN	AAAA	2001:db8::01f2
+aaaa-499		IN	AAAA	2001:db8::01f3
+aaaa-500		IN	AAAA	2001:db8::01f4
+aaaa-501		IN	AAAA	2001:db8::01f5
+aaaa-502		IN	AAAA	2001:db8::01f6
+aaaa-503		IN	AAAA	2001:db8::01f7
+aaaa-504		IN	AAAA	2001:db8::01f8
+aaaa-505		IN	AAAA	2001:db8::01f9
+aaaa-506		IN	AAAA	2001:db8::01fa
+aaaa-507		IN	AAAA	2001:db8::01fb
+aaaa-508		IN	AAAA	2001:db8::01fc
+aaaa-509		IN	AAAA	2001:db8::01fd
+aaaa-510		IN	AAAA	2001:db8::01fe
+aaaa-511		IN	AAAA	2001:db8::01ff
+
+bbbb-000		IN	AAAA	2001:db8::1:0000
+bbbb-001		IN	AAAA	2001:db8::1:0001
+bbbb-002		IN	AAAA	2001:db8::1:0002
+bbbb-003		IN	AAAA	2001:db8::1:0003
+bbbb-004		IN	AAAA	2001:db8::1:0004
+bbbb-005		IN	AAAA	2001:db8::1:0005
+bbbb-006		IN	AAAA	2001:db8::1:0006
+bbbb-007		IN	AAAA	2001:db8::1:0007
+bbbb-008		IN	AAAA	2001:db8::1:0008
+bbbb-009		IN	AAAA	2001:db8::1:0009
+bbbb-010		IN	AAAA	2001:db8::1:000a
+bbbb-011		IN	AAAA	2001:db8::1:000b
+bbbb-012		IN	AAAA	2001:db8::1:000c
+bbbb-013		IN	AAAA	2001:db8::1:000d
+bbbb-014		IN	AAAA	2001:db8::1:000e
+bbbb-015		IN	AAAA	2001:db8::1:000f
+bbbb-016		IN	AAAA	2001:db8::1:0010
+bbbb-017		IN	AAAA	2001:db8::1:0011
+bbbb-018		IN	AAAA	2001:db8::1:0012
+bbbb-019		IN	AAAA	2001:db8::1:0013
+bbbb-020		IN	AAAA	2001:db8::1:0014
+bbbb-021		IN	AAAA	2001:db8::1:0015
+bbbb-022		IN	AAAA	2001:db8::1:0016
+bbbb-023		IN	AAAA	2001:db8::1:0017
+bbbb-024		IN	AAAA	2001:db8::1:0018
+bbbb-025		IN	AAAA	2001:db8::1:0019
+bbbb-026		IN	AAAA	2001:db8::1:001a
+bbbb-027		IN	AAAA	2001:db8::1:001b
+bbbb-028		IN	AAAA	2001:db8::1:001c
+bbbb-029		IN	AAAA	2001:db8::1:001d
+bbbb-030		IN	AAAA	2001:db8::1:001e
+bbbb-031		IN	AAAA	2001:db8::1:001f
+bbbb-032		IN	AAAA	2001:db8::1:0020
+bbbb-033		IN	AAAA	2001:db8::1:0021
+bbbb-034		IN	AAAA	2001:db8::1:0022
+bbbb-035		IN	AAAA	2001:db8::1:0023
+bbbb-036		IN	AAAA	2001:db8::1:0024
+bbbb-037		IN	AAAA	2001:db8::1:0025
+bbbb-038		IN	AAAA	2001:db8::1:0026
+bbbb-039		IN	AAAA	2001:db8::1:0027
+bbbb-040		IN	AAAA	2001:db8::1:0028
+bbbb-041		IN	AAAA	2001:db8::1:0029
+bbbb-042		IN	AAAA	2001:db8::1:002a
+bbbb-043		IN	AAAA	2001:db8::1:002b
+bbbb-044		IN	AAAA	2001:db8::1:002c
+bbbb-045		IN	AAAA	2001:db8::1:002d
+bbbb-046		IN	AAAA	2001:db8::1:002e
+bbbb-047		IN	AAAA	2001:db8::1:002f
+bbbb-048		IN	AAAA	2001:db8::1:0030
+bbbb-049		IN	AAAA	2001:db8::1:0031
+bbbb-050		IN	AAAA	2001:db8::1:0032
+bbbb-051		IN	AAAA	2001:db8::1:0033
+bbbb-052		IN	AAAA	2001:db8::1:0034
+bbbb-053		IN	AAAA	2001:db8::1:0035
+bbbb-054		IN	AAAA	2001:db8::1:0036
+bbbb-055		IN	AAAA	2001:db8::1:0037
+bbbb-056		IN	AAAA	2001:db8::1:0038
+bbbb-057		IN	AAAA	2001:db8::1:0039
+bbbb-058		IN	AAAA	2001:db8::1:003a
+bbbb-059		IN	AAAA	2001:db8::1:003b
+bbbb-060		IN	AAAA	2001:db8::1:003c
+bbbb-061		IN	AAAA	2001:db8::1:003d
+bbbb-062		IN	AAAA	2001:db8::1:003e
+bbbb-063		IN	AAAA	2001:db8::1:003f
+bbbb-064		IN	AAAA	2001:db8::1:0040
+bbbb-065		IN	AAAA	2001:db8::1:0041
+bbbb-066		IN	AAAA	2001:db8::1:0042
+bbbb-067		IN	AAAA	2001:db8::1:0043
+bbbb-068		IN	AAAA	2001:db8::1:0044
+bbbb-069		IN	AAAA	2001:db8::1:0045
+bbbb-070		IN	AAAA	2001:db8::1:0046
+bbbb-071		IN	AAAA	2001:db8::1:0047
+bbbb-072		IN	AAAA	2001:db8::1:0048
+bbbb-073		IN	AAAA	2001:db8::1:0049
+bbbb-074		IN	AAAA	2001:db8::1:004a
+bbbb-075		IN	AAAA	2001:db8::1:004b
+bbbb-076		IN	AAAA	2001:db8::1:004c
+bbbb-077		IN	AAAA	2001:db8::1:004d
+bbbb-078		IN	AAAA	2001:db8::1:004e
+bbbb-079		IN	AAAA	2001:db8::1:004f
+bbbb-080		IN	AAAA	2001:db8::1:0050
+bbbb-081		IN	AAAA	2001:db8::1:0051
+bbbb-082		IN	AAAA	2001:db8::1:0052
+bbbb-083		IN	AAAA	2001:db8::1:0053
+bbbb-084		IN	AAAA	2001:db8::1:0054
+bbbb-085		IN	AAAA	2001:db8::1:0055
+bbbb-086		IN	AAAA	2001:db8::1:0056
+bbbb-087		IN	AAAA	2001:db8::1:0057
+bbbb-088		IN	AAAA	2001:db8::1:0058
+bbbb-089		IN	AAAA	2001:db8::1:0059
+bbbb-090		IN	AAAA	2001:db8::1:005a
+bbbb-091		IN	AAAA	2001:db8::1:005b
+bbbb-092		IN	AAAA	2001:db8::1:005c
+bbbb-093		IN	AAAA	2001:db8::1:005d
+bbbb-094		IN	AAAA	2001:db8::1:005e
+bbbb-095		IN	AAAA	2001:db8::1:005f
+bbbb-096		IN	AAAA	2001:db8::1:0060
+bbbb-097		IN	AAAA	2001:db8::1:0061
+bbbb-098		IN	AAAA	2001:db8::1:0062
+bbbb-099		IN	AAAA	2001:db8::1:0063
+bbbb-100		IN	AAAA	2001:db8::1:0064
+bbbb-101		IN	AAAA	2001:db8::1:0065
+bbbb-102		IN	AAAA	2001:db8::1:0066
+bbbb-103		IN	AAAA	2001:db8::1:0067
+bbbb-104		IN	AAAA	2001:db8::1:0068
+bbbb-105		IN	AAAA	2001:db8::1:0069
+bbbb-106		IN	AAAA	2001:db8::1:006a
+bbbb-107		IN	AAAA	2001:db8::1:006b
+bbbb-108		IN	AAAA	2001:db8::1:006c
+bbbb-109		IN	AAAA	2001:db8::1:006d
+bbbb-110		IN	AAAA	2001:db8::1:006e
+bbbb-111		IN	AAAA	2001:db8::1:006f
+bbbb-112		IN	AAAA	2001:db8::1:0070
+bbbb-113		IN	AAAA	2001:db8::1:0071
+bbbb-114		IN	AAAA	2001:db8::1:0072
+bbbb-115		IN	AAAA	2001:db8::1:0073
+bbbb-116		IN	AAAA	2001:db8::1:0074
+bbbb-117		IN	AAAA	2001:db8::1:0075
+bbbb-118		IN	AAAA	2001:db8::1:0076
+bbbb-119		IN	AAAA	2001:db8::1:0077
+bbbb-120		IN	AAAA	2001:db8::1:0078
+bbbb-121		IN	AAAA	2001:db8::1:0079
+bbbb-122		IN	AAAA	2001:db8::1:007a
+bbbb-123		IN	AAAA	2001:db8::1:007b
+bbbb-124		IN	AAAA	2001:db8::1:007c
+bbbb-125		IN	AAAA	2001:db8::1:007d
+bbbb-126		IN	AAAA	2001:db8::1:007e
+bbbb-127		IN	AAAA	2001:db8::1:007f
+bbbb-128		IN	AAAA	2001:db8::1:0080
+bbbb-129		IN	AAAA	2001:db8::1:0081
+bbbb-130		IN	AAAA	2001:db8::1:0082
+bbbb-131		IN	AAAA	2001:db8::1:0083
+bbbb-132		IN	AAAA	2001:db8::1:0084
+bbbb-133		IN	AAAA	2001:db8::1:0085
+bbbb-134		IN	AAAA	2001:db8::1:0086
+bbbb-135		IN	AAAA	2001:db8::1:0087
+bbbb-136		IN	AAAA	2001:db8::1:0088
+bbbb-137		IN	AAAA	2001:db8::1:0089
+bbbb-138		IN	AAAA	2001:db8::1:008a
+bbbb-139		IN	AAAA	2001:db8::1:008b
+bbbb-140		IN	AAAA	2001:db8::1:008c
+bbbb-141		IN	AAAA	2001:db8::1:008d
+bbbb-142		IN	AAAA	2001:db8::1:008e
+bbbb-143		IN	AAAA	2001:db8::1:008f
+bbbb-144		IN	AAAA	2001:db8::1:0090
+bbbb-145		IN	AAAA	2001:db8::1:0091
+bbbb-146		IN	AAAA	2001:db8::1:0092
+bbbb-147		IN	AAAA	2001:db8::1:0093
+bbbb-148		IN	AAAA	2001:db8::1:0094
+bbbb-149		IN	AAAA	2001:db8::1:0095
+bbbb-150		IN	AAAA	2001:db8::1:0096
+bbbb-151		IN	AAAA	2001:db8::1:0097
+bbbb-152		IN	AAAA	2001:db8::1:0098
+bbbb-153		IN	AAAA	2001:db8::1:0099
+bbbb-154		IN	AAAA	2001:db8::1:009a
+bbbb-155		IN	AAAA	2001:db8::1:009b
+bbbb-156		IN	AAAA	2001:db8::1:009c
+bbbb-157		IN	AAAA	2001:db8::1:009d
+bbbb-158		IN	AAAA	2001:db8::1:009e
+bbbb-159		IN	AAAA	2001:db8::1:009f
+bbbb-160		IN	AAAA	2001:db8::1:00a0
+bbbb-161		IN	AAAA	2001:db8::1:00a1
+bbbb-162		IN	AAAA	2001:db8::1:00a2
+bbbb-163		IN	AAAA	2001:db8::1:00a3
+bbbb-164		IN	AAAA	2001:db8::1:00a4
+bbbb-165		IN	AAAA	2001:db8::1:00a5
+bbbb-166		IN	AAAA	2001:db8::1:00a6
+bbbb-167		IN	AAAA	2001:db8::1:00a7
+bbbb-168		IN	AAAA	2001:db8::1:00a8
+bbbb-169		IN	AAAA	2001:db8::1:00a9
+bbbb-170		IN	AAAA	2001:db8::1:00aa
+bbbb-171		IN	AAAA	2001:db8::1:00ab
+bbbb-172		IN	AAAA	2001:db8::1:00ac
+bbbb-173		IN	AAAA	2001:db8::1:00ad
+bbbb-174		IN	AAAA	2001:db8::1:00ae
+bbbb-175		IN	AAAA	2001:db8::1:00af
+bbbb-176		IN	AAAA	2001:db8::1:00b0
+bbbb-177		IN	AAAA	2001:db8::1:00b1
+bbbb-178		IN	AAAA	2001:db8::1:00b2
+bbbb-179		IN	AAAA	2001:db8::1:00b3
+bbbb-180		IN	AAAA	2001:db8::1:00b4
+bbbb-181		IN	AAAA	2001:db8::1:00b5
+bbbb-182		IN	AAAA	2001:db8::1:00b6
+bbbb-183		IN	AAAA	2001:db8::1:00b7
+bbbb-184		IN	AAAA	2001:db8::1:00b8
+bbbb-185		IN	AAAA	2001:db8::1:00b9
+bbbb-186		IN	AAAA	2001:db8::1:00ba
+bbbb-187		IN	AAAA	2001:db8::1:00bb
+bbbb-188		IN	AAAA	2001:db8::1:00bc
+bbbb-189		IN	AAAA	2001:db8::1:00bd
+bbbb-190		IN	AAAA	2001:db8::1:00be
+bbbb-191		IN	AAAA	2001:db8::1:00bf
+bbbb-192		IN	AAAA	2001:db8::1:00c0
+bbbb-193		IN	AAAA	2001:db8::1:00c1
+bbbb-194		IN	AAAA	2001:db8::1:00c2
+bbbb-195		IN	AAAA	2001:db8::1:00c3
+bbbb-196		IN	AAAA	2001:db8::1:00c4
+bbbb-197		IN	AAAA	2001:db8::1:00c5
+bbbb-198		IN	AAAA	2001:db8::1:00c6
+bbbb-199		IN	AAAA	2001:db8::1:00c7
+bbbb-200		IN	AAAA	2001:db8::1:00c8
+bbbb-201		IN	AAAA	2001:db8::1:00c9
+bbbb-202		IN	AAAA	2001:db8::1:00ca
+bbbb-203		IN	AAAA	2001:db8::1:00cb
+bbbb-204		IN	AAAA	2001:db8::1:00cc
+bbbb-205		IN	AAAA	2001:db8::1:00cd
+bbbb-206		IN	AAAA	2001:db8::1:00ce
+bbbb-207		IN	AAAA	2001:db8::1:00cf
+bbbb-208		IN	AAAA	2001:db8::1:00d0
+bbbb-209		IN	AAAA	2001:db8::1:00d1
+bbbb-210		IN	AAAA	2001:db8::1:00d2
+bbbb-211		IN	AAAA	2001:db8::1:00d3
+bbbb-212		IN	AAAA	2001:db8::1:00d4
+bbbb-213		IN	AAAA	2001:db8::1:00d5
+bbbb-214		IN	AAAA	2001:db8::1:00d6
+bbbb-215		IN	AAAA	2001:db8::1:00d7
+bbbb-216		IN	AAAA	2001:db8::1:00d8
+bbbb-217		IN	AAAA	2001:db8::1:00d9
+bbbb-218		IN	AAAA	2001:db8::1:00da
+bbbb-219		IN	AAAA	2001:db8::1:00db
+bbbb-220		IN	AAAA	2001:db8::1:00dc
+bbbb-221		IN	AAAA	2001:db8::1:00dd
+bbbb-222		IN	AAAA	2001:db8::1:00de
+bbbb-223		IN	AAAA	2001:db8::1:00df
+bbbb-224		IN	AAAA	2001:db8::1:00e0
+bbbb-225		IN	AAAA	2001:db8::1:00e1
+bbbb-226		IN	AAAA	2001:db8::1:00e2
+bbbb-227		IN	AAAA	2001:db8::1:00e3
+bbbb-228		IN	AAAA	2001:db8::1:00e4
+bbbb-229		IN	AAAA	2001:db8::1:00e5
+bbbb-230		IN	AAAA	2001:db8::1:00e6
+bbbb-231		IN	AAAA	2001:db8::1:00e7
+bbbb-232		IN	AAAA	2001:db8::1:00e8
+bbbb-233		IN	AAAA	2001:db8::1:00e9
+bbbb-234		IN	AAAA	2001:db8::1:00ea
+bbbb-235		IN	AAAA	2001:db8::1:00eb
+bbbb-236		IN	AAAA	2001:db8::1:00ec
+bbbb-237		IN	AAAA	2001:db8::1:00ed
+bbbb-238		IN	AAAA	2001:db8::1:00ee
+bbbb-239		IN	AAAA	2001:db8::1:00ef
+bbbb-240		IN	AAAA	2001:db8::1:00f0
+bbbb-241		IN	AAAA	2001:db8::1:00f1
+bbbb-242		IN	AAAA	2001:db8::1:00f2
+bbbb-243		IN	AAAA	2001:db8::1:00f3
+bbbb-244		IN	AAAA	2001:db8::1:00f4
+bbbb-245		IN	AAAA	2001:db8::1:00f5
+bbbb-246		IN	AAAA	2001:db8::1:00f6
+bbbb-247		IN	AAAA	2001:db8::1:00f7
+bbbb-248		IN	AAAA	2001:db8::1:00f8
+bbbb-249		IN	AAAA	2001:db8::1:00f9
+bbbb-250		IN	AAAA	2001:db8::1:00fa
+bbbb-251		IN	AAAA	2001:db8::1:00fb
+bbbb-252		IN	AAAA	2001:db8::1:00fc
+bbbb-253		IN	AAAA	2001:db8::1:00fd
+bbbb-254		IN	AAAA	2001:db8::1:00fe
+bbbb-255		IN	AAAA	2001:db8::1:00ff
+bbbb-256		IN	AAAA	2001:db8::1:0100
+bbbb-257		IN	AAAA	2001:db8::1:0101
+bbbb-258		IN	AAAA	2001:db8::1:0102
+bbbb-259		IN	AAAA	2001:db8::1:0103
+bbbb-260		IN	AAAA	2001:db8::1:0104
+bbbb-261		IN	AAAA	2001:db8::1:0105
+bbbb-262		IN	AAAA	2001:db8::1:0106
+bbbb-263		IN	AAAA	2001:db8::1:0107
+bbbb-264		IN	AAAA	2001:db8::1:0108
+bbbb-265		IN	AAAA	2001:db8::1:0109
+bbbb-266		IN	AAAA	2001:db8::1:010a
+bbbb-267		IN	AAAA	2001:db8::1:010b
+bbbb-268		IN	AAAA	2001:db8::1:010c
+bbbb-269		IN	AAAA	2001:db8::1:010d
+bbbb-270		IN	AAAA	2001:db8::1:010e
+bbbb-271		IN	AAAA	2001:db8::1:010f
+bbbb-272		IN	AAAA	2001:db8::1:0110
+bbbb-273		IN	AAAA	2001:db8::1:0111
+bbbb-274		IN	AAAA	2001:db8::1:0112
+bbbb-275		IN	AAAA	2001:db8::1:0113
+bbbb-276		IN	AAAA	2001:db8::1:0114
+bbbb-277		IN	AAAA	2001:db8::1:0115
+bbbb-278		IN	AAAA	2001:db8::1:0116
+bbbb-279		IN	AAAA	2001:db8::1:0117
+bbbb-280		IN	AAAA	2001:db8::1:0118
+bbbb-281		IN	AAAA	2001:db8::1:0119
+bbbb-282		IN	AAAA	2001:db8::1:011a
+bbbb-283		IN	AAAA	2001:db8::1:011b
+bbbb-284		IN	AAAA	2001:db8::1:011c
+bbbb-285		IN	AAAA	2001:db8::1:011d
+bbbb-286		IN	AAAA	2001:db8::1:011e
+bbbb-287		IN	AAAA	2001:db8::1:011f
+bbbb-288		IN	AAAA	2001:db8::1:0120
+bbbb-289		IN	AAAA	2001:db8::1:0121
+bbbb-290		IN	AAAA	2001:db8::1:0122
+bbbb-291		IN	AAAA	2001:db8::1:0123
+bbbb-292		IN	AAAA	2001:db8::1:0124
+bbbb-293		IN	AAAA	2001:db8::1:0125
+bbbb-294		IN	AAAA	2001:db8::1:0126
+bbbb-295		IN	AAAA	2001:db8::1:0127
+bbbb-296		IN	AAAA	2001:db8::1:0128
+bbbb-297		IN	AAAA	2001:db8::1:0129
+bbbb-298		IN	AAAA	2001:db8::1:012a
+bbbb-299		IN	AAAA	2001:db8::1:012b
+bbbb-300		IN	AAAA	2001:db8::1:012c
+bbbb-301		IN	AAAA	2001:db8::1:012d
+bbbb-302		IN	AAAA	2001:db8::1:012e
+bbbb-303		IN	AAAA	2001:db8::1:012f
+bbbb-304		IN	AAAA	2001:db8::1:0130
+bbbb-305		IN	AAAA	2001:db8::1:0131
+bbbb-306		IN	AAAA	2001:db8::1:0132
+bbbb-307		IN	AAAA	2001:db8::1:0133
+bbbb-308		IN	AAAA	2001:db8::1:0134
+bbbb-309		IN	AAAA	2001:db8::1:0135
+bbbb-310		IN	AAAA	2001:db8::1:0136
+bbbb-311		IN	AAAA	2001:db8::1:0137
+bbbb-312		IN	AAAA	2001:db8::1:0138
+bbbb-313		IN	AAAA	2001:db8::1:0139
+bbbb-314		IN	AAAA	2001:db8::1:013a
+bbbb-315		IN	AAAA	2001:db8::1:013b
+bbbb-316		IN	AAAA	2001:db8::1:013c
+bbbb-317		IN	AAAA	2001:db8::1:013d
+bbbb-318		IN	AAAA	2001:db8::1:013e
+bbbb-319		IN	AAAA	2001:db8::1:013f
+bbbb-320		IN	AAAA	2001:db8::1:0140
+bbbb-321		IN	AAAA	2001:db8::1:0141
+bbbb-322		IN	AAAA	2001:db8::1:0142
+bbbb-323		IN	AAAA	2001:db8::1:0143
+bbbb-324		IN	AAAA	2001:db8::1:0144
+bbbb-325		IN	AAAA	2001:db8::1:0145
+bbbb-326		IN	AAAA	2001:db8::1:0146
+bbbb-327		IN	AAAA	2001:db8::1:0147
+bbbb-328		IN	AAAA	2001:db8::1:0148
+bbbb-329		IN	AAAA	2001:db8::1:0149
+bbbb-330		IN	AAAA	2001:db8::1:014a
+bbbb-331		IN	AAAA	2001:db8::1:014b
+bbbb-332		IN	AAAA	2001:db8::1:014c
+bbbb-333		IN	AAAA	2001:db8::1:014d
+bbbb-334		IN	AAAA	2001:db8::1:014e
+bbbb-335		IN	AAAA	2001:db8::1:014f
+bbbb-336		IN	AAAA	2001:db8::1:0150
+bbbb-337		IN	AAAA	2001:db8::1:0151
+bbbb-338		IN	AAAA	2001:db8::1:0152
+bbbb-339		IN	AAAA	2001:db8::1:0153
+bbbb-340		IN	AAAA	2001:db8::1:0154
+bbbb-341		IN	AAAA	2001:db8::1:0155
+bbbb-342		IN	AAAA	2001:db8::1:0156
+bbbb-343		IN	AAAA	2001:db8::1:0157
+bbbb-344		IN	AAAA	2001:db8::1:0158
+bbbb-345		IN	AAAA	2001:db8::1:0159
+bbbb-346		IN	AAAA	2001:db8::1:015a
+bbbb-347		IN	AAAA	2001:db8::1:015b
+bbbb-348		IN	AAAA	2001:db8::1:015c
+bbbb-349		IN	AAAA	2001:db8::1:015d
+bbbb-350		IN	AAAA	2001:db8::1:015e
+bbbb-351		IN	AAAA	2001:db8::1:015f
+bbbb-352		IN	AAAA	2001:db8::1:0160
+bbbb-353		IN	AAAA	2001:db8::1:0161
+bbbb-354		IN	AAAA	2001:db8::1:0162
+bbbb-355		IN	AAAA	2001:db8::1:0163
+bbbb-356		IN	AAAA	2001:db8::1:0164
+bbbb-357		IN	AAAA	2001:db8::1:0165
+bbbb-358		IN	AAAA	2001:db8::1:0166
+bbbb-359		IN	AAAA	2001:db8::1:0167
+bbbb-360		IN	AAAA	2001:db8::1:0168
+bbbb-361		IN	AAAA	2001:db8::1:0169
+bbbb-362		IN	AAAA	2001:db8::1:016a
+bbbb-363		IN	AAAA	2001:db8::1:016b
+bbbb-364		IN	AAAA	2001:db8::1:016c
+bbbb-365		IN	AAAA	2001:db8::1:016d
+bbbb-366		IN	AAAA	2001:db8::1:016e
+bbbb-367		IN	AAAA	2001:db8::1:016f
+bbbb-368		IN	AAAA	2001:db8::1:0170
+bbbb-369		IN	AAAA	2001:db8::1:0171
+bbbb-370		IN	AAAA	2001:db8::1:0172
+bbbb-371		IN	AAAA	2001:db8::1:0173
+bbbb-372		IN	AAAA	2001:db8::1:0174
+bbbb-373		IN	AAAA	2001:db8::1:0175
+bbbb-374		IN	AAAA	2001:db8::1:0176
+bbbb-375		IN	AAAA	2001:db8::1:0177
+bbbb-376		IN	AAAA	2001:db8::1:0178
+bbbb-377		IN	AAAA	2001:db8::1:0179
+bbbb-378		IN	AAAA	2001:db8::1:017a
+bbbb-379		IN	AAAA	2001:db8::1:017b
+bbbb-380		IN	AAAA	2001:db8::1:017c
+bbbb-381		IN	AAAA	2001:db8::1:017d
+bbbb-382		IN	AAAA	2001:db8::1:017e
+bbbb-383		IN	AAAA	2001:db8::1:017f
+bbbb-384		IN	AAAA	2001:db8::1:0180
+bbbb-385		IN	AAAA	2001:db8::1:0181
+bbbb-386		IN	AAAA	2001:db8::1:0182
+bbbb-387		IN	AAAA	2001:db8::1:0183
+bbbb-388		IN	AAAA	2001:db8::1:0184
+bbbb-389		IN	AAAA	2001:db8::1:0185
+bbbb-390		IN	AAAA	2001:db8::1:0186
+bbbb-391		IN	AAAA	2001:db8::1:0187
+bbbb-392		IN	AAAA	2001:db8::1:0188
+bbbb-393		IN	AAAA	2001:db8::1:0189
+bbbb-394		IN	AAAA	2001:db8::1:018a
+bbbb-395		IN	AAAA	2001:db8::1:018b
+bbbb-396		IN	AAAA	2001:db8::1:018c
+bbbb-397		IN	AAAA	2001:db8::1:018d
+bbbb-398		IN	AAAA	2001:db8::1:018e
+bbbb-399		IN	AAAA	2001:db8::1:018f
+bbbb-400		IN	AAAA	2001:db8::1:0190
+bbbb-401		IN	AAAA	2001:db8::1:0191
+bbbb-402		IN	AAAA	2001:db8::1:0192
+bbbb-403		IN	AAAA	2001:db8::1:0193
+bbbb-404		IN	AAAA	2001:db8::1:0194
+bbbb-405		IN	AAAA	2001:db8::1:0195
+bbbb-406		IN	AAAA	2001:db8::1:0196
+bbbb-407		IN	AAAA	2001:db8::1:0197
+bbbb-408		IN	AAAA	2001:db8::1:0198
+bbbb-409		IN	AAAA	2001:db8::1:0199
+bbbb-410		IN	AAAA	2001:db8::1:019a
+bbbb-411		IN	AAAA	2001:db8::1:019b
+bbbb-412		IN	AAAA	2001:db8::1:019c
+bbbb-413		IN	AAAA	2001:db8::1:019d
+bbbb-414		IN	AAAA	2001:db8::1:019e
+bbbb-415		IN	AAAA	2001:db8::1:019f
+bbbb-416		IN	AAAA	2001:db8::1:01a0
+bbbb-417		IN	AAAA	2001:db8::1:01a1
+bbbb-418		IN	AAAA	2001:db8::1:01a2
+bbbb-419		IN	AAAA	2001:db8::1:01a3
+bbbb-420		IN	AAAA	2001:db8::1:01a4
+bbbb-421		IN	AAAA	2001:db8::1:01a5
+bbbb-422		IN	AAAA	2001:db8::1:01a6
+bbbb-423		IN	AAAA	2001:db8::1:01a7
+bbbb-424		IN	AAAA	2001:db8::1:01a8
+bbbb-425		IN	AAAA	2001:db8::1:01a9
+bbbb-426		IN	AAAA	2001:db8::1:01aa
+bbbb-427		IN	AAAA	2001:db8::1:01ab
+bbbb-428		IN	AAAA	2001:db8::1:01ac
+bbbb-429		IN	AAAA	2001:db8::1:01ad
+bbbb-430		IN	AAAA	2001:db8::1:01ae
+bbbb-431		IN	AAAA	2001:db8::1:01af
+bbbb-432		IN	AAAA	2001:db8::1:01b0
+bbbb-433		IN	AAAA	2001:db8::1:01b1
+bbbb-434		IN	AAAA	2001:db8::1:01b2
+bbbb-435		IN	AAAA	2001:db8::1:01b3
+bbbb-436		IN	AAAA	2001:db8::1:01b4
+bbbb-437		IN	AAAA	2001:db8::1:01b5
+bbbb-438		IN	AAAA	2001:db8::1:01b6
+bbbb-439		IN	AAAA	2001:db8::1:01b7
+bbbb-440		IN	AAAA	2001:db8::1:01b8
+bbbb-441		IN	AAAA	2001:db8::1:01b9
+bbbb-442		IN	AAAA	2001:db8::1:01ba
+bbbb-443		IN	AAAA	2001:db8::1:01bb
+bbbb-444		IN	AAAA	2001:db8::1:01bc
+bbbb-445		IN	AAAA	2001:db8::1:01bd
+bbbb-446		IN	AAAA	2001:db8::1:01be
+bbbb-447		IN	AAAA	2001:db8::1:01bf
+bbbb-448		IN	AAAA	2001:db8::1:01c0
+bbbb-449		IN	AAAA	2001:db8::1:01c1
+bbbb-450		IN	AAAA	2001:db8::1:01c2
+bbbb-451		IN	AAAA	2001:db8::1:01c3
+bbbb-452		IN	AAAA	2001:db8::1:01c4
+bbbb-453		IN	AAAA	2001:db8::1:01c5
+bbbb-454		IN	AAAA	2001:db8::1:01c6
+bbbb-455		IN	AAAA	2001:db8::1:01c7
+bbbb-456		IN	AAAA	2001:db8::1:01c8
+bbbb-457		IN	AAAA	2001:db8::1:01c9
+bbbb-458		IN	AAAA	2001:db8::1:01ca
+bbbb-459		IN	AAAA	2001:db8::1:01cb
+bbbb-460		IN	AAAA	2001:db8::1:01cc
+bbbb-461		IN	AAAA	2001:db8::1:01cd
+bbbb-462		IN	AAAA	2001:db8::1:01ce
+bbbb-463		IN	AAAA	2001:db8::1:01cf
+bbbb-464		IN	AAAA	2001:db8::1:01d0
+bbbb-465		IN	AAAA	2001:db8::1:01d1
+bbbb-466		IN	AAAA	2001:db8::1:01d2
+bbbb-467		IN	AAAA	2001:db8::1:01d3
+bbbb-468		IN	AAAA	2001:db8::1:01d4
+bbbb-469		IN	AAAA	2001:db8::1:01d5
+bbbb-470		IN	AAAA	2001:db8::1:01d6
+bbbb-471		IN	AAAA	2001:db8::1:01d7
+bbbb-472		IN	AAAA	2001:db8::1:01d8
+bbbb-473		IN	AAAA	2001:db8::1:01d9
+bbbb-474		IN	AAAA	2001:db8::1:01da
+bbbb-475		IN	AAAA	2001:db8::1:01db
+bbbb-476		IN	AAAA	2001:db8::1:01dc
+bbbb-477		IN	AAAA	2001:db8::1:01dd
+bbbb-478		IN	AAAA	2001:db8::1:01de
+bbbb-479		IN	AAAA	2001:db8::1:01df
+bbbb-480		IN	AAAA	2001:db8::1:01e0
+bbbb-481		IN	AAAA	2001:db8::1:01e1
+bbbb-482		IN	AAAA	2001:db8::1:01e2
+bbbb-483		IN	AAAA	2001:db8::1:01e3
+bbbb-484		IN	AAAA	2001:db8::1:01e4
+bbbb-485		IN	AAAA	2001:db8::1:01e5
+bbbb-486		IN	AAAA	2001:db8::1:01e6
+bbbb-487		IN	AAAA	2001:db8::1:01e7
+bbbb-488		IN	AAAA	2001:db8::1:01e8
+bbbb-489		IN	AAAA	2001:db8::1:01e9
+bbbb-490		IN	AAAA	2001:db8::1:01ea
+bbbb-491		IN	AAAA	2001:db8::1:01eb
+bbbb-492		IN	AAAA	2001:db8::1:01ec
+bbbb-493		IN	AAAA	2001:db8::1:01ed
+bbbb-494		IN	AAAA	2001:db8::1:01ee
+bbbb-495		IN	AAAA	2001:db8::1:01ef
+bbbb-496		IN	AAAA	2001:db8::1:01f0
+bbbb-497		IN	AAAA	2001:db8::1:01f1
+bbbb-498		IN	AAAA	2001:db8::1:01f2
+bbbb-499		IN	AAAA	2001:db8::1:01f3
+bbbb-500		IN	AAAA	2001:db8::1:01f4
+bbbb-501		IN	AAAA	2001:db8::1:01f5
+bbbb-502		IN	AAAA	2001:db8::1:01f6
+bbbb-503		IN	AAAA	2001:db8::1:01f7
+bbbb-504		IN	AAAA	2001:db8::1:01f8
+bbbb-505		IN	AAAA	2001:db8::1:01f9
+bbbb-506		IN	AAAA	2001:db8::1:01fa
+bbbb-507		IN	AAAA	2001:db8::1:01fb
+bbbb-508		IN	AAAA	2001:db8::1:01fc
+bbbb-509		IN	AAAA	2001:db8::1:01fd
+bbbb-510		IN	AAAA	2001:db8::1:01fe
+bbbb-511		IN	AAAA	2001:db8::1:01ff
+
+cccc-000		IN	AAAA	2001:db8::2:0000
+cccc-001		IN	AAAA	2001:db8::2:0001
+cccc-002		IN	AAAA	2001:db8::2:0002
+cccc-003		IN	AAAA	2001:db8::2:0003
+cccc-004		IN	AAAA	2001:db8::2:0004
+cccc-005		IN	AAAA	2001:db8::2:0005
+cccc-006		IN	AAAA	2001:db8::2:0006
+cccc-007		IN	AAAA	2001:db8::2:0007
+cccc-008		IN	AAAA	2001:db8::2:0008
+cccc-009		IN	AAAA	2001:db8::2:0009
+cccc-010		IN	AAAA	2001:db8::2:000a
+cccc-011		IN	AAAA	2001:db8::2:000b
+cccc-012		IN	AAAA	2001:db8::2:000c
+cccc-013		IN	AAAA	2001:db8::2:000d
+cccc-014		IN	AAAA	2001:db8::2:000e
+cccc-015		IN	AAAA	2001:db8::2:000f
+cccc-016		IN	AAAA	2001:db8::2:0010
+cccc-017		IN	AAAA	2001:db8::2:0011
+cccc-018		IN	AAAA	2001:db8::2:0012
+cccc-019		IN	AAAA	2001:db8::2:0013
+cccc-020		IN	AAAA	2001:db8::2:0014
+cccc-021		IN	AAAA	2001:db8::2:0015
+cccc-022		IN	AAAA	2001:db8::2:0016
+cccc-023		IN	AAAA	2001:db8::2:0017
+cccc-024		IN	AAAA	2001:db8::2:0018
+cccc-025		IN	AAAA	2001:db8::2:0019
+cccc-026		IN	AAAA	2001:db8::2:001a
+cccc-027		IN	AAAA	2001:db8::2:001b
+cccc-028		IN	AAAA	2001:db8::2:001c
+cccc-029		IN	AAAA	2001:db8::2:001d
+cccc-030		IN	AAAA	2001:db8::2:001e
+cccc-031		IN	AAAA	2001:db8::2:001f
+cccc-032		IN	AAAA	2001:db8::2:0020
+cccc-033		IN	AAAA	2001:db8::2:0021
+cccc-034		IN	AAAA	2001:db8::2:0022
+cccc-035		IN	AAAA	2001:db8::2:0023
+cccc-036		IN	AAAA	2001:db8::2:0024
+cccc-037		IN	AAAA	2001:db8::2:0025
+cccc-038		IN	AAAA	2001:db8::2:0026
+cccc-039		IN	AAAA	2001:db8::2:0027
+cccc-040		IN	AAAA	2001:db8::2:0028
+cccc-041		IN	AAAA	2001:db8::2:0029
+cccc-042		IN	AAAA	2001:db8::2:002a
+cccc-043		IN	AAAA	2001:db8::2:002b
+cccc-044		IN	AAAA	2001:db8::2:002c
+cccc-045		IN	AAAA	2001:db8::2:002d
+cccc-046		IN	AAAA	2001:db8::2:002e
+cccc-047		IN	AAAA	2001:db8::2:002f
+cccc-048		IN	AAAA	2001:db8::2:0030
+cccc-049		IN	AAAA	2001:db8::2:0031
+cccc-050		IN	AAAA	2001:db8::2:0032
+cccc-051		IN	AAAA	2001:db8::2:0033
+cccc-052		IN	AAAA	2001:db8::2:0034
+cccc-053		IN	AAAA	2001:db8::2:0035
+cccc-054		IN	AAAA	2001:db8::2:0036
+cccc-055		IN	AAAA	2001:db8::2:0037
+cccc-056		IN	AAAA	2001:db8::2:0038
+cccc-057		IN	AAAA	2001:db8::2:0039
+cccc-058		IN	AAAA	2001:db8::2:003a
+cccc-059		IN	AAAA	2001:db8::2:003b
+cccc-060		IN	AAAA	2001:db8::2:003c
+cccc-061		IN	AAAA	2001:db8::2:003d
+cccc-062		IN	AAAA	2001:db8::2:003e
+cccc-063		IN	AAAA	2001:db8::2:003f
+cccc-064		IN	AAAA	2001:db8::2:0040
+cccc-065		IN	AAAA	2001:db8::2:0041
+cccc-066		IN	AAAA	2001:db8::2:0042
+cccc-067		IN	AAAA	2001:db8::2:0043
+cccc-068		IN	AAAA	2001:db8::2:0044
+cccc-069		IN	AAAA	2001:db8::2:0045
+cccc-070		IN	AAAA	2001:db8::2:0046
+cccc-071		IN	AAAA	2001:db8::2:0047
+cccc-072		IN	AAAA	2001:db8::2:0048
+cccc-073		IN	AAAA	2001:db8::2:0049
+cccc-074		IN	AAAA	2001:db8::2:004a
+cccc-075		IN	AAAA	2001:db8::2:004b
+cccc-076		IN	AAAA	2001:db8::2:004c
+cccc-077		IN	AAAA	2001:db8::2:004d
+cccc-078		IN	AAAA	2001:db8::2:004e
+cccc-079		IN	AAAA	2001:db8::2:004f
+cccc-080		IN	AAAA	2001:db8::2:0050
+cccc-081		IN	AAAA	2001:db8::2:0051
+cccc-082		IN	AAAA	2001:db8::2:0052
+cccc-083		IN	AAAA	2001:db8::2:0053
+cccc-084		IN	AAAA	2001:db8::2:0054
+cccc-085		IN	AAAA	2001:db8::2:0055
+cccc-086		IN	AAAA	2001:db8::2:0056
+cccc-087		IN	AAAA	2001:db8::2:0057
+cccc-088		IN	AAAA	2001:db8::2:0058
+cccc-089		IN	AAAA	2001:db8::2:0059
+cccc-090		IN	AAAA	2001:db8::2:005a
+cccc-091		IN	AAAA	2001:db8::2:005b
+cccc-092		IN	AAAA	2001:db8::2:005c
+cccc-093		IN	AAAA	2001:db8::2:005d
+cccc-094		IN	AAAA	2001:db8::2:005e
+cccc-095		IN	AAAA	2001:db8::2:005f
+cccc-096		IN	AAAA	2001:db8::2:0060
+cccc-097		IN	AAAA	2001:db8::2:0061
+cccc-098		IN	AAAA	2001:db8::2:0062
+cccc-099		IN	AAAA	2001:db8::2:0063
+cccc-100		IN	AAAA	2001:db8::2:0064
+cccc-101		IN	AAAA	2001:db8::2:0065
+cccc-102		IN	AAAA	2001:db8::2:0066
+cccc-103		IN	AAAA	2001:db8::2:0067
+cccc-104		IN	AAAA	2001:db8::2:0068
+cccc-105		IN	AAAA	2001:db8::2:0069
+cccc-106		IN	AAAA	2001:db8::2:006a
+cccc-107		IN	AAAA	2001:db8::2:006b
+cccc-108		IN	AAAA	2001:db8::2:006c
+cccc-109		IN	AAAA	2001:db8::2:006d
+cccc-110		IN	AAAA	2001:db8::2:006e
+cccc-111		IN	AAAA	2001:db8::2:006f
+cccc-112		IN	AAAA	2001:db8::2:0070
+cccc-113		IN	AAAA	2001:db8::2:0071
+cccc-114		IN	AAAA	2001:db8::2:0072
+cccc-115		IN	AAAA	2001:db8::2:0073
+cccc-116		IN	AAAA	2001:db8::2:0074
+cccc-117		IN	AAAA	2001:db8::2:0075
+cccc-118		IN	AAAA	2001:db8::2:0076
+cccc-119		IN	AAAA	2001:db8::2:0077
+cccc-120		IN	AAAA	2001:db8::2:0078
+cccc-121		IN	AAAA	2001:db8::2:0079
+cccc-122		IN	AAAA	2001:db8::2:007a
+cccc-123		IN	AAAA	2001:db8::2:007b
+cccc-124		IN	AAAA	2001:db8::2:007c
+cccc-125		IN	AAAA	2001:db8::2:007d
+cccc-126		IN	AAAA	2001:db8::2:007e
+cccc-127		IN	AAAA	2001:db8::2:007f
+cccc-128		IN	AAAA	2001:db8::2:0080
+cccc-129		IN	AAAA	2001:db8::2:0081
+cccc-130		IN	AAAA	2001:db8::2:0082
+cccc-131		IN	AAAA	2001:db8::2:0083
+cccc-132		IN	AAAA	2001:db8::2:0084
+cccc-133		IN	AAAA	2001:db8::2:0085
+cccc-134		IN	AAAA	2001:db8::2:0086
+cccc-135		IN	AAAA	2001:db8::2:0087
+cccc-136		IN	AAAA	2001:db8::2:0088
+cccc-137		IN	AAAA	2001:db8::2:0089
+cccc-138		IN	AAAA	2001:db8::2:008a
+cccc-139		IN	AAAA	2001:db8::2:008b
+cccc-140		IN	AAAA	2001:db8::2:008c
+cccc-141		IN	AAAA	2001:db8::2:008d
+cccc-142		IN	AAAA	2001:db8::2:008e
+cccc-143		IN	AAAA	2001:db8::2:008f
+cccc-144		IN	AAAA	2001:db8::2:0090
+cccc-145		IN	AAAA	2001:db8::2:0091
+cccc-146		IN	AAAA	2001:db8::2:0092
+cccc-147		IN	AAAA	2001:db8::2:0093
+cccc-148		IN	AAAA	2001:db8::2:0094
+cccc-149		IN	AAAA	2001:db8::2:0095
+cccc-150		IN	AAAA	2001:db8::2:0096
+cccc-151		IN	AAAA	2001:db8::2:0097
+cccc-152		IN	AAAA	2001:db8::2:0098
+cccc-153		IN	AAAA	2001:db8::2:0099
+cccc-154		IN	AAAA	2001:db8::2:009a
+cccc-155		IN	AAAA	2001:db8::2:009b
+cccc-156		IN	AAAA	2001:db8::2:009c
+cccc-157		IN	AAAA	2001:db8::2:009d
+cccc-158		IN	AAAA	2001:db8::2:009e
+cccc-159		IN	AAAA	2001:db8::2:009f
+cccc-160		IN	AAAA	2001:db8::2:00a0
+cccc-161		IN	AAAA	2001:db8::2:00a1
+cccc-162		IN	AAAA	2001:db8::2:00a2
+cccc-163		IN	AAAA	2001:db8::2:00a3
+cccc-164		IN	AAAA	2001:db8::2:00a4
+cccc-165		IN	AAAA	2001:db8::2:00a5
+cccc-166		IN	AAAA	2001:db8::2:00a6
+cccc-167		IN	AAAA	2001:db8::2:00a7
+cccc-168		IN	AAAA	2001:db8::2:00a8
+cccc-169		IN	AAAA	2001:db8::2:00a9
+cccc-170		IN	AAAA	2001:db8::2:00aa
+cccc-171		IN	AAAA	2001:db8::2:00ab
+cccc-172		IN	AAAA	2001:db8::2:00ac
+cccc-173		IN	AAAA	2001:db8::2:00ad
+cccc-174		IN	AAAA	2001:db8::2:00ae
+cccc-175		IN	AAAA	2001:db8::2:00af
+cccc-176		IN	AAAA	2001:db8::2:00b0
+cccc-177		IN	AAAA	2001:db8::2:00b1
+cccc-178		IN	AAAA	2001:db8::2:00b2
+cccc-179		IN	AAAA	2001:db8::2:00b3
+cccc-180		IN	AAAA	2001:db8::2:00b4
+cccc-181		IN	AAAA	2001:db8::2:00b5
+cccc-182		IN	AAAA	2001:db8::2:00b6
+cccc-183		IN	AAAA	2001:db8::2:00b7
+cccc-184		IN	AAAA	2001:db8::2:00b8
+cccc-185		IN	AAAA	2001:db8::2:00b9
+cccc-186		IN	AAAA	2001:db8::2:00ba
+cccc-187		IN	AAAA	2001:db8::2:00bb
+cccc-188		IN	AAAA	2001:db8::2:00bc
+cccc-189		IN	AAAA	2001:db8::2:00bd
+cccc-190		IN	AAAA	2001:db8::2:00be
+cccc-191		IN	AAAA	2001:db8::2:00bf
+cccc-192		IN	AAAA	2001:db8::2:00c0
+cccc-193		IN	AAAA	2001:db8::2:00c1
+cccc-194		IN	AAAA	2001:db8::2:00c2
+cccc-195		IN	AAAA	2001:db8::2:00c3
+cccc-196		IN	AAAA	2001:db8::2:00c4
+cccc-197		IN	AAAA	2001:db8::2:00c5
+cccc-198		IN	AAAA	2001:db8::2:00c6
+cccc-199		IN	AAAA	2001:db8::2:00c7
+cccc-200		IN	AAAA	2001:db8::2:00c8
+cccc-201		IN	AAAA	2001:db8::2:00c9
+cccc-202		IN	AAAA	2001:db8::2:00ca
+cccc-203		IN	AAAA	2001:db8::2:00cb
+cccc-204		IN	AAAA	2001:db8::2:00cc
+cccc-205		IN	AAAA	2001:db8::2:00cd
+cccc-206		IN	AAAA	2001:db8::2:00ce
+cccc-207		IN	AAAA	2001:db8::2:00cf
+cccc-208		IN	AAAA	2001:db8::2:00d0
+cccc-209		IN	AAAA	2001:db8::2:00d1
+cccc-210		IN	AAAA	2001:db8::2:00d2
+cccc-211		IN	AAAA	2001:db8::2:00d3
+cccc-212		IN	AAAA	2001:db8::2:00d4
+cccc-213		IN	AAAA	2001:db8::2:00d5
+cccc-214		IN	AAAA	2001:db8::2:00d6
+cccc-215		IN	AAAA	2001:db8::2:00d7
+cccc-216		IN	AAAA	2001:db8::2:00d8
+cccc-217		IN	AAAA	2001:db8::2:00d9
+cccc-218		IN	AAAA	2001:db8::2:00da
+cccc-219		IN	AAAA	2001:db8::2:00db
+cccc-220		IN	AAAA	2001:db8::2:00dc
+cccc-221		IN	AAAA	2001:db8::2:00dd
+cccc-222		IN	AAAA	2001:db8::2:00de
+cccc-223		IN	AAAA	2001:db8::2:00df
+cccc-224		IN	AAAA	2001:db8::2:00e0
+cccc-225		IN	AAAA	2001:db8::2:00e1
+cccc-226		IN	AAAA	2001:db8::2:00e2
+cccc-227		IN	AAAA	2001:db8::2:00e3
+cccc-228		IN	AAAA	2001:db8::2:00e4
+cccc-229		IN	AAAA	2001:db8::2:00e5
+cccc-230		IN	AAAA	2001:db8::2:00e6
+cccc-231		IN	AAAA	2001:db8::2:00e7
+cccc-232		IN	AAAA	2001:db8::2:00e8
+cccc-233		IN	AAAA	2001:db8::2:00e9
+cccc-234		IN	AAAA	2001:db8::2:00ea
+cccc-235		IN	AAAA	2001:db8::2:00eb
+cccc-236		IN	AAAA	2001:db8::2:00ec
+cccc-237		IN	AAAA	2001:db8::2:00ed
+cccc-238		IN	AAAA	2001:db8::2:00ee
+cccc-239		IN	AAAA	2001:db8::2:00ef
+cccc-240		IN	AAAA	2001:db8::2:00f0
+cccc-241		IN	AAAA	2001:db8::2:00f1
+cccc-242		IN	AAAA	2001:db8::2:00f2
+cccc-243		IN	AAAA	2001:db8::2:00f3
+cccc-244		IN	AAAA	2001:db8::2:00f4
+cccc-245		IN	AAAA	2001:db8::2:00f5
+cccc-246		IN	AAAA	2001:db8::2:00f6
+cccc-247		IN	AAAA	2001:db8::2:00f7
+cccc-248		IN	AAAA	2001:db8::2:00f8
+cccc-249		IN	AAAA	2001:db8::2:00f9
+cccc-250		IN	AAAA	2001:db8::2:00fa
+cccc-251		IN	AAAA	2001:db8::2:00fb
+cccc-252		IN	AAAA	2001:db8::2:00fc
+cccc-253		IN	AAAA	2001:db8::2:00fd
+cccc-254		IN	AAAA	2001:db8::2:00fe
+cccc-255		IN	AAAA	2001:db8::2:00ff
+cccc-256		IN	AAAA	2001:db8::2:0100
+cccc-257		IN	AAAA	2001:db8::2:0101
+cccc-258		IN	AAAA	2001:db8::2:0102
+cccc-259		IN	AAAA	2001:db8::2:0103
+cccc-260		IN	AAAA	2001:db8::2:0104
+cccc-261		IN	AAAA	2001:db8::2:0105
+cccc-262		IN	AAAA	2001:db8::2:0106
+cccc-263		IN	AAAA	2001:db8::2:0107
+cccc-264		IN	AAAA	2001:db8::2:0108
+cccc-265		IN	AAAA	2001:db8::2:0109
+cccc-266		IN	AAAA	2001:db8::2:010a
+cccc-267		IN	AAAA	2001:db8::2:010b
+cccc-268		IN	AAAA	2001:db8::2:010c
+cccc-269		IN	AAAA	2001:db8::2:010d
+cccc-270		IN	AAAA	2001:db8::2:010e
+cccc-271		IN	AAAA	2001:db8::2:010f
+cccc-272		IN	AAAA	2001:db8::2:0110
+cccc-273		IN	AAAA	2001:db8::2:0111
+cccc-274		IN	AAAA	2001:db8::2:0112
+cccc-275		IN	AAAA	2001:db8::2:0113
+cccc-276		IN	AAAA	2001:db8::2:0114
+cccc-277		IN	AAAA	2001:db8::2:0115
+cccc-278		IN	AAAA	2001:db8::2:0116
+cccc-279		IN	AAAA	2001:db8::2:0117
+cccc-280		IN	AAAA	2001:db8::2:0118
+cccc-281		IN	AAAA	2001:db8::2:0119
+cccc-282		IN	AAAA	2001:db8::2:011a
+cccc-283		IN	AAAA	2001:db8::2:011b
+cccc-284		IN	AAAA	2001:db8::2:011c
+cccc-285		IN	AAAA	2001:db8::2:011d
+cccc-286		IN	AAAA	2001:db8::2:011e
+cccc-287		IN	AAAA	2001:db8::2:011f
+cccc-288		IN	AAAA	2001:db8::2:0120
+cccc-289		IN	AAAA	2001:db8::2:0121
+cccc-290		IN	AAAA	2001:db8::2:0122
+cccc-291		IN	AAAA	2001:db8::2:0123
+cccc-292		IN	AAAA	2001:db8::2:0124
+cccc-293		IN	AAAA	2001:db8::2:0125
+cccc-294		IN	AAAA	2001:db8::2:0126
+cccc-295		IN	AAAA	2001:db8::2:0127
+cccc-296		IN	AAAA	2001:db8::2:0128
+cccc-297		IN	AAAA	2001:db8::2:0129
+cccc-298		IN	AAAA	2001:db8::2:012a
+cccc-299		IN	AAAA	2001:db8::2:012b
+cccc-300		IN	AAAA	2001:db8::2:012c
+cccc-301		IN	AAAA	2001:db8::2:012d
+cccc-302		IN	AAAA	2001:db8::2:012e
+cccc-303		IN	AAAA	2001:db8::2:012f
+cccc-304		IN	AAAA	2001:db8::2:0130
+cccc-305		IN	AAAA	2001:db8::2:0131
+cccc-306		IN	AAAA	2001:db8::2:0132
+cccc-307		IN	AAAA	2001:db8::2:0133
+cccc-308		IN	AAAA	2001:db8::2:0134
+cccc-309		IN	AAAA	2001:db8::2:0135
+cccc-310		IN	AAAA	2001:db8::2:0136
+cccc-311		IN	AAAA	2001:db8::2:0137
+cccc-312		IN	AAAA	2001:db8::2:0138
+cccc-313		IN	AAAA	2001:db8::2:0139
+cccc-314		IN	AAAA	2001:db8::2:013a
+cccc-315		IN	AAAA	2001:db8::2:013b
+cccc-316		IN	AAAA	2001:db8::2:013c
+cccc-317		IN	AAAA	2001:db8::2:013d
+cccc-318		IN	AAAA	2001:db8::2:013e
+cccc-319		IN	AAAA	2001:db8::2:013f
+cccc-320		IN	AAAA	2001:db8::2:0140
+cccc-321		IN	AAAA	2001:db8::2:0141
+cccc-322		IN	AAAA	2001:db8::2:0142
+cccc-323		IN	AAAA	2001:db8::2:0143
+cccc-324		IN	AAAA	2001:db8::2:0144
+cccc-325		IN	AAAA	2001:db8::2:0145
+cccc-326		IN	AAAA	2001:db8::2:0146
+cccc-327		IN	AAAA	2001:db8::2:0147
+cccc-328		IN	AAAA	2001:db8::2:0148
+cccc-329		IN	AAAA	2001:db8::2:0149
+cccc-330		IN	AAAA	2001:db8::2:014a
+cccc-331		IN	AAAA	2001:db8::2:014b
+cccc-332		IN	AAAA	2001:db8::2:014c
+cccc-333		IN	AAAA	2001:db8::2:014d
+cccc-334		IN	AAAA	2001:db8::2:014e
+cccc-335		IN	AAAA	2001:db8::2:014f
+cccc-336		IN	AAAA	2001:db8::2:0150
+cccc-337		IN	AAAA	2001:db8::2:0151
+cccc-338		IN	AAAA	2001:db8::2:0152
+cccc-339		IN	AAAA	2001:db8::2:0153
+cccc-340		IN	AAAA	2001:db8::2:0154
+cccc-341		IN	AAAA	2001:db8::2:0155
+cccc-342		IN	AAAA	2001:db8::2:0156
+cccc-343		IN	AAAA	2001:db8::2:0157
+cccc-344		IN	AAAA	2001:db8::2:0158
+cccc-345		IN	AAAA	2001:db8::2:0159
+cccc-346		IN	AAAA	2001:db8::2:015a
+cccc-347		IN	AAAA	2001:db8::2:015b
+cccc-348		IN	AAAA	2001:db8::2:015c
+cccc-349		IN	AAAA	2001:db8::2:015d
+cccc-350		IN	AAAA	2001:db8::2:015e
+cccc-351		IN	AAAA	2001:db8::2:015f
+cccc-352		IN	AAAA	2001:db8::2:0160
+cccc-353		IN	AAAA	2001:db8::2:0161
+cccc-354		IN	AAAA	2001:db8::2:0162
+cccc-355		IN	AAAA	2001:db8::2:0163
+cccc-356		IN	AAAA	2001:db8::2:0164
+cccc-357		IN	AAAA	2001:db8::2:0165
+cccc-358		IN	AAAA	2001:db8::2:0166
+cccc-359		IN	AAAA	2001:db8::2:0167
+cccc-360		IN	AAAA	2001:db8::2:0168
+cccc-361		IN	AAAA	2001:db8::2:0169
+cccc-362		IN	AAAA	2001:db8::2:016a
+cccc-363		IN	AAAA	2001:db8::2:016b
+cccc-364		IN	AAAA	2001:db8::2:016c
+cccc-365		IN	AAAA	2001:db8::2:016d
+cccc-366		IN	AAAA	2001:db8::2:016e
+cccc-367		IN	AAAA	2001:db8::2:016f
+cccc-368		IN	AAAA	2001:db8::2:0170
+cccc-369		IN	AAAA	2001:db8::2:0171
+cccc-370		IN	AAAA	2001:db8::2:0172
+cccc-371		IN	AAAA	2001:db8::2:0173
+cccc-372		IN	AAAA	2001:db8::2:0174
+cccc-373		IN	AAAA	2001:db8::2:0175
+cccc-374		IN	AAAA	2001:db8::2:0176
+cccc-375		IN	AAAA	2001:db8::2:0177
+cccc-376		IN	AAAA	2001:db8::2:0178
+cccc-377		IN	AAAA	2001:db8::2:0179
+cccc-378		IN	AAAA	2001:db8::2:017a
+cccc-379		IN	AAAA	2001:db8::2:017b
+cccc-380		IN	AAAA	2001:db8::2:017c
+cccc-381		IN	AAAA	2001:db8::2:017d
+cccc-382		IN	AAAA	2001:db8::2:017e
+cccc-383		IN	AAAA	2001:db8::2:017f
+cccc-384		IN	AAAA	2001:db8::2:0180
+cccc-385		IN	AAAA	2001:db8::2:0181
+cccc-386		IN	AAAA	2001:db8::2:0182
+cccc-387		IN	AAAA	2001:db8::2:0183
+cccc-388		IN	AAAA	2001:db8::2:0184
+cccc-389		IN	AAAA	2001:db8::2:0185
+cccc-390		IN	AAAA	2001:db8::2:0186
+cccc-391		IN	AAAA	2001:db8::2:0187
+cccc-392		IN	AAAA	2001:db8::2:0188
+cccc-393		IN	AAAA	2001:db8::2:0189
+cccc-394		IN	AAAA	2001:db8::2:018a
+cccc-395		IN	AAAA	2001:db8::2:018b
+cccc-396		IN	AAAA	2001:db8::2:018c
+cccc-397		IN	AAAA	2001:db8::2:018d
+cccc-398		IN	AAAA	2001:db8::2:018e
+cccc-399		IN	AAAA	2001:db8::2:018f
+cccc-400		IN	AAAA	2001:db8::2:0190
+cccc-401		IN	AAAA	2001:db8::2:0191
+cccc-402		IN	AAAA	2001:db8::2:0192
+cccc-403		IN	AAAA	2001:db8::2:0193
+cccc-404		IN	AAAA	2001:db8::2:0194
+cccc-405		IN	AAAA	2001:db8::2:0195
+cccc-406		IN	AAAA	2001:db8::2:0196
+cccc-407		IN	AAAA	2001:db8::2:0197
+cccc-408		IN	AAAA	2001:db8::2:0198
+cccc-409		IN	AAAA	2001:db8::2:0199
+cccc-410		IN	AAAA	2001:db8::2:019a
+cccc-411		IN	AAAA	2001:db8::2:019b
+cccc-412		IN	AAAA	2001:db8::2:019c
+cccc-413		IN	AAAA	2001:db8::2:019d
+cccc-414		IN	AAAA	2001:db8::2:019e
+cccc-415		IN	AAAA	2001:db8::2:019f
+cccc-416		IN	AAAA	2001:db8::2:01a0
+cccc-417		IN	AAAA	2001:db8::2:01a1
+cccc-418		IN	AAAA	2001:db8::2:01a2
+cccc-419		IN	AAAA	2001:db8::2:01a3
+cccc-420		IN	AAAA	2001:db8::2:01a4
+cccc-421		IN	AAAA	2001:db8::2:01a5
+cccc-422		IN	AAAA	2001:db8::2:01a6
+cccc-423		IN	AAAA	2001:db8::2:01a7
+cccc-424		IN	AAAA	2001:db8::2:01a8
+cccc-425		IN	AAAA	2001:db8::2:01a9
+cccc-426		IN	AAAA	2001:db8::2:01aa
+cccc-427		IN	AAAA	2001:db8::2:01ab
+cccc-428		IN	AAAA	2001:db8::2:01ac
+cccc-429		IN	AAAA	2001:db8::2:01ad
+cccc-430		IN	AAAA	2001:db8::2:01ae
+cccc-431		IN	AAAA	2001:db8::2:01af
+cccc-432		IN	AAAA	2001:db8::2:01b0
+cccc-433		IN	AAAA	2001:db8::2:01b1
+cccc-434		IN	AAAA	2001:db8::2:01b2
+cccc-435		IN	AAAA	2001:db8::2:01b3
+cccc-436		IN	AAAA	2001:db8::2:01b4
+cccc-437		IN	AAAA	2001:db8::2:01b5
+cccc-438		IN	AAAA	2001:db8::2:01b6
+cccc-439		IN	AAAA	2001:db8::2:01b7
+cccc-440		IN	AAAA	2001:db8::2:01b8
+cccc-441		IN	AAAA	2001:db8::2:01b9
+cccc-442		IN	AAAA	2001:db8::2:01ba
+cccc-443		IN	AAAA	2001:db8::2:01bb
+cccc-444		IN	AAAA	2001:db8::2:01bc
+cccc-445		IN	AAAA	2001:db8::2:01bd
+cccc-446		IN	AAAA	2001:db8::2:01be
+cccc-447		IN	AAAA	2001:db8::2:01bf
+cccc-448		IN	AAAA	2001:db8::2:01c0
+cccc-449		IN	AAAA	2001:db8::2:01c1
+cccc-450		IN	AAAA	2001:db8::2:01c2
+cccc-451		IN	AAAA	2001:db8::2:01c3
+cccc-452		IN	AAAA	2001:db8::2:01c4
+cccc-453		IN	AAAA	2001:db8::2:01c5
+cccc-454		IN	AAAA	2001:db8::2:01c6
+cccc-455		IN	AAAA	2001:db8::2:01c7
+cccc-456		IN	AAAA	2001:db8::2:01c8
+cccc-457		IN	AAAA	2001:db8::2:01c9
+cccc-458		IN	AAAA	2001:db8::2:01ca
+cccc-459		IN	AAAA	2001:db8::2:01cb
+cccc-460		IN	AAAA	2001:db8::2:01cc
+cccc-461		IN	AAAA	2001:db8::2:01cd
+cccc-462		IN	AAAA	2001:db8::2:01ce
+cccc-463		IN	AAAA	2001:db8::2:01cf
+cccc-464		IN	AAAA	2001:db8::2:01d0
+cccc-465		IN	AAAA	2001:db8::2:01d1
+cccc-466		IN	AAAA	2001:db8::2:01d2
+cccc-467		IN	AAAA	2001:db8::2:01d3
+cccc-468		IN	AAAA	2001:db8::2:01d4
+cccc-469		IN	AAAA	2001:db8::2:01d5
+cccc-470		IN	AAAA	2001:db8::2:01d6
+cccc-471		IN	AAAA	2001:db8::2:01d7
+cccc-472		IN	AAAA	2001:db8::2:01d8
+cccc-473		IN	AAAA	2001:db8::2:01d9
+cccc-474		IN	AAAA	2001:db8::2:01da
+cccc-475		IN	AAAA	2001:db8::2:01db
+cccc-476		IN	AAAA	2001:db8::2:01dc
+cccc-477		IN	AAAA	2001:db8::2:01dd
+cccc-478		IN	AAAA	2001:db8::2:01de
+cccc-479		IN	AAAA	2001:db8::2:01df
+cccc-480		IN	AAAA	2001:db8::2:01e0
+cccc-481		IN	AAAA	2001:db8::2:01e1
+cccc-482		IN	AAAA	2001:db8::2:01e2
+cccc-483		IN	AAAA	2001:db8::2:01e3
+cccc-484		IN	AAAA	2001:db8::2:01e4
+cccc-485		IN	AAAA	2001:db8::2:01e5
+cccc-486		IN	AAAA	2001:db8::2:01e6
+cccc-487		IN	AAAA	2001:db8::2:01e7
+cccc-488		IN	AAAA	2001:db8::2:01e8
+cccc-489		IN	AAAA	2001:db8::2:01e9
+cccc-490		IN	AAAA	2001:db8::2:01ea
+cccc-491		IN	AAAA	2001:db8::2:01eb
+cccc-492		IN	AAAA	2001:db8::2:01ec
+cccc-493		IN	AAAA	2001:db8::2:01ed
+cccc-494		IN	AAAA	2001:db8::2:01ee
+cccc-495		IN	AAAA	2001:db8::2:01ef
+cccc-496		IN	AAAA	2001:db8::2:01f0
+cccc-497		IN	AAAA	2001:db8::2:01f1
+cccc-498		IN	AAAA	2001:db8::2:01f2
+cccc-499		IN	AAAA	2001:db8::2:01f3
+cccc-500		IN	AAAA	2001:db8::2:01f4
+cccc-501		IN	AAAA	2001:db8::2:01f5
+cccc-502		IN	AAAA	2001:db8::2:01f6
+cccc-503		IN	AAAA	2001:db8::2:01f7
+cccc-504		IN	AAAA	2001:db8::2:01f8
+cccc-505		IN	AAAA	2001:db8::2:01f9
+cccc-506		IN	AAAA	2001:db8::2:01fa
+cccc-507		IN	AAAA	2001:db8::2:01fb
+cccc-508		IN	AAAA	2001:db8::2:01fc
+cccc-509		IN	AAAA	2001:db8::2:01fd
+cccc-510		IN	AAAA	2001:db8::2:01fe
+cccc-511		IN	AAAA	2001:db8::2:01ff
diff --git a/tests/system/ixfr/db.example.n0.in b/tests/system/ixfr/db.example.n0.in
new file mode 100644
index 0000000..92fa0b0
--- /dev/null
+++ b/tests/system/ixfr/db.example.n0.in
@@ -0,0 +1,29 @@
+; Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@       IN	SOA	ns1.example. hostmaster.example. 100 3600 900 7200 300
+
+        IN	NS	ns1.example.
+        IN	NS	ns2.example.
+
+ns1     IN  A   192.0.2.1
+ns2     IN  A   192.0.2.2
+
+a-1     IN  A   192.0.2.101
+b-1     IN  A   192.0.2.201
+
+$INCLUDE @abs_top_builddir@/tests/system/ixfr/db.example.common
diff --git a/tests/system/ixfr/db.example.n2.in b/tests/system/ixfr/db.example.n2.in
new file mode 100644
index 0000000..6a999af
--- /dev/null
+++ b/tests/system/ixfr/db.example.n2.in
@@ -0,0 +1,28 @@
+; Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@       IN	SOA	ns1.example. hostmaster.example. 98 3600 900 7200 300
+
+        IN	NS	ns1.example.
+        IN	NS	ns2.example.
+
+ns1     IN  A   192.0.2.1
+ns2     IN  A   192.0.2.2
+
+a-1     IN  A   192.0.2.101
+
+$INCLUDE @abs_top_builddir@/tests/system/ixfr/db.example.common
diff --git a/tests/system/ixfr/db.example.n2.refresh.in b/tests/system/ixfr/db.example.n2.refresh.in
new file mode 100644
index 0000000..2c59416
--- /dev/null
+++ b/tests/system/ixfr/db.example.n2.refresh.in
@@ -0,0 +1,28 @@
+; Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@       IN	SOA	ns1.example. hostmaster.example. 98 30 2 7200 300
+
+        IN	NS	ns1.example.
+        IN	NS	ns2.example.
+
+ns1     IN  A   192.0.2.1
+ns2     IN  A   192.0.2.2
+
+a-1     IN  A   192.0.2.101
+
+$INCLUDE @abs_top_builddir@/tests/system/ixfr/db.example.common
diff --git a/tests/system/ixfr/db.example.n4.in b/tests/system/ixfr/db.example.n4.in
new file mode 100644
index 0000000..ae15a54
--- /dev/null
+++ b/tests/system/ixfr/db.example.n4.in
@@ -0,0 +1,31 @@
+; Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@       IN	SOA	ns1.example. hostmaster.example. 96 3600 900 7200 300
+
+        IN	NS	ns1.example.
+        IN	NS	ns2.example.
+
+ns1     IN  A   192.0.2.1
+ns2     IN  A   192.0.2.2
+
+a-1     IN  A   192.0.2.101
+a-2     IN  A   192.0.2.102
+b-1     IN  A   192.0.2.201
+b-2     IN  A   192.0.2.202
+
+$INCLUDE @abs_top_builddir@/tests/system/ixfr/db.example.common
diff --git a/tests/system/ixfr/db.example.n6.in b/tests/system/ixfr/db.example.n6.in
new file mode 100644
index 0000000..33a82a0
--- /dev/null
+++ b/tests/system/ixfr/db.example.n6.in
@@ -0,0 +1,29 @@
+; Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+;
+; Permission to use, copy, modify, and/or distribute this software for any
+; purpose with or without fee is hereby granted, provided that the above
+; copyright notice and this permission notice appear in all copies.
+;
+; THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+; REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+; AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+; INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+; LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+; OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+; PERFORMANCE OF THIS SOFTWARE.
+
+$ORIGIN example.
+$TTL 3600
+
+@       IN	SOA	ns1.example. hostmaster.example. 94 3600 900 7200 300
+
+        IN	NS	ns1.example.
+        IN	NS	ns2.example.
+
+ns1     IN  A   192.0.2.1
+ns2     IN  A   192.0.2.2
+
+a-1     IN  A   192.0.2.101
+a-2     IN  A   192.0.2.102
+b-1     IN  A   192.0.2.201
+b-2     IN  A   192.0.2.202
diff --git a/tests/system/ixfr/in-1/clean.sh b/tests/system/ixfr/in-1/clean.sh
new file mode 120000
index 0000000..099bebd
--- /dev/null
+++ b/tests/system/ixfr/in-1/clean.sh
@@ -0,0 +1 @@
+../clean_ns.sh
\ No newline at end of file
diff --git a/tests/system/ixfr/in-1/ns1/README b/tests/system/ixfr/in-1/ns1/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-1/ns1/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence.  It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-1/nsx2/README b/tests/system/ixfr/in-1/nsx2/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-1/nsx2/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence.  It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-1/setup.sh.in b/tests/system/ixfr/in-1/setup.sh.in
new file mode 100644
index 0000000..d4c3978
--- /dev/null
+++ b/tests/system/ixfr/in-1/setup.sh.in
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007, 2011  Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2001, 2002  Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+. @abs_top_builddir@/tests/system/conf.sh
+
+# Clean up from last time
+sh clean.sh
+
+# Set up the initial version of the IXFR server - load the n-4 version of the
+# zone.  The configuration file enables IXFR and disabled notifies.
+cp -f $IXFR_TOP/named_nonotify.conf ns1/named.conf
+cp -f $IXFR_TOP/db.example.n4 ns1/db.example
+
+# Set up the IXFR client - load the same version of the zone.
+cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
+${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n4
diff --git a/tests/system/ixfr/in-1/tests.sh b/tests/system/ixfr/in-1/tests.sh
new file mode 100644
index 0000000..2f49ddf
--- /dev/null
+++ b/tests/system/ixfr/in-1/tests.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script performs the first IXFR-IN test.  A BIND 9 nameserver (the
+# "server") contains a version of the zone (version N) and two previous
+# versions, N-2 and N-4.  A BIND 10 nameserver (the "client") is loaded with
+# version N-4 of the zone.  A NOTIFY is sent to it, and it is expected that
+# it will send an IXFR to the server and update itself with the latest version
+# of the zone.  (The changes are such that the update should be in the form of
+# a single UDP packet.)
+#
+# The pre-requisites for this test are the same as for the common tests, so
+# we can execute that directly.
+
+. ../common_tests.sh
+status=$?
+
+# TODO: Check the BIND 10 log, looking for the IXFR messages that indicate that
+# it has initiated an IXFR and that it received the update within a single
+# packet.
+
+echo "I:exit status: $status"
+exit $status
diff --git a/tests/system/ixfr/in-2/clean.sh b/tests/system/ixfr/in-2/clean.sh
new file mode 120000
index 0000000..099bebd
--- /dev/null
+++ b/tests/system/ixfr/in-2/clean.sh
@@ -0,0 +1 @@
+../clean_ns.sh
\ No newline at end of file
diff --git a/tests/system/ixfr/in-2/ns1/README b/tests/system/ixfr/in-2/ns1/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-2/ns1/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence.  It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-2/nsx2/README b/tests/system/ixfr/in-2/nsx2/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-2/nsx2/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence.  It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-2/setup.sh.in b/tests/system/ixfr/in-2/setup.sh.in
new file mode 100644
index 0000000..a5f64e5
--- /dev/null
+++ b/tests/system/ixfr/in-2/setup.sh.in
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+. @abs_top_builddir@/tests/system/conf.sh
+
+# Clean up from last time
+sh clean.sh
+
+# Set up the initial version of the IXFR server - load the n-6 version of the
+# zone.  The configuration file enables IXFR and disables notifies.
+cp -f $IXFR_TOP/named_nonotify.conf ns1/named.conf
+cp -f $IXFR_TOP/db.example.n6 ns1/db.example
+
+# Set up the IXFR client - load an earlier version of the zone
+cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
+${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n6
diff --git a/tests/system/ixfr/in-2/tests.sh b/tests/system/ixfr/in-2/tests.sh
new file mode 100644
index 0000000..7b1e2a8
--- /dev/null
+++ b/tests/system/ixfr/in-2/tests.sh
@@ -0,0 +1,81 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script performs the first IXFR-IN test.  A BIND 9 nameserver (the
+# "server") contains a version of the zone (version N) and three previous
+# versions, N-2, N-4 and N-6.  A BIND 10 nameserver (the "client") is loaded
+# with version N-6 of the zone.  A NOTIFY is sent to it, and it is expected that
+# it will send an IXFR to the server and update itself with the latest version
+# of the zone.  (The changes are such that the update will have to take place
+# over TCP.)
+
+. ../ixfr_init.sh
+
+# On entry, the IXFR server is at version N-6.  The common tests assume that
+# it is an N-4, so update it.
+echo "I:$SERVER_NAME updating IXFR-server to suitable start version"
+update_server_zone $SERVER_NAME $SERVER_IP $IXFR_TOP/db.example.n4
+if [ $? -ne 0 ];
+then
+    exit 1
+fi
+
+# The pre-requisites for this test are the same as for the common tests, so
+# we can execute that directly.
+. ../common_tests.sh
+if [ $? -ne 0 ];
+then
+    exit 1
+fi
+
+# TEMPORARY: at the time of writing (October 2011) BIND 10 does not attempt
+# a UDP transfer first.  Therefore just check for TCP transfer.
+
+# Check that the client initiated and completed an IXFR.  Use a simple grep as
+# the syntax and capabilities of egrep may vary between systems.
+grep XFRIN_XFR_TRANSFER_STARTED nsx2/bind10.run | grep IXFR > /dev/null
+if [ $? -ne 0 ];
+then
+    echo "R:$CLIENT_NAME FAIL no 'IXFR started' message in the BIND 10 log"
+    exit 1
+fi
+
+grep XFRIN_XFR_TRANSFER_SUCCESS nsx2/bind10.run | grep IXFR > /dev/null
+if [ $? -ne 0 ];
+then
+    echo "R:$CLIENT_NAME FAIL no 'IXFR successful' message in the BIND 10 log"
+    exit 1
+fi
+
+# Look in the named log file to see if a TCP IXFR was requested.  Again use a
+# simple grep.
+grep "transfer of" ns1/named.run | grep "sending TCP message" > /dev/null
+if [ $? -ne 0 ];
+then
+    echo "R:$SERVER_NAME FAIL no 'sending TCP' message in the BIND 9 log"
+    exit 1
+fi
+
+grep "IXFR ended" ns1/named.run > /dev/null
+if [ $? -ne 0 ];
+then
+    echo "R:$SERVER_NAME FAIL no 'IXFR ended' message in the BIND 9 log"
+    exit 1
+fi
+
+echo "I:exit status: 0"
+exit 0
diff --git a/tests/system/ixfr/in-3/clean.sh b/tests/system/ixfr/in-3/clean.sh
new file mode 120000
index 0000000..099bebd
--- /dev/null
+++ b/tests/system/ixfr/in-3/clean.sh
@@ -0,0 +1 @@
+../clean_ns.sh
\ No newline at end of file
diff --git a/tests/system/ixfr/in-3/ns1/README b/tests/system/ixfr/in-3/ns1/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-3/ns1/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence.  It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-3/nsx2/README b/tests/system/ixfr/in-3/nsx2/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-3/nsx2/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence.  It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-3/setup.sh.in b/tests/system/ixfr/in-3/setup.sh.in
new file mode 100644
index 0000000..867e06e
--- /dev/null
+++ b/tests/system/ixfr/in-3/setup.sh.in
@@ -0,0 +1,29 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+. @abs_top_builddir@/tests/system/conf.sh
+# Clean up from last time
+
+sh clean.sh
+
+# Set up the initial version of the IXFR server - load the latest version of
+# the zone.
+cp -f $IXFR_TOP/named_noixfr.conf ns1/named.conf
+cp -f $IXFR_TOP/db.example.n0 ns1/db.example
+
+# Set up the IXFR client - load a previous version of the zone.
+cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
+${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n2
diff --git a/tests/system/ixfr/in-3/tests.sh b/tests/system/ixfr/in-3/tests.sh
new file mode 100644
index 0000000..d47a221
--- /dev/null
+++ b/tests/system/ixfr/in-3/tests.sh
@@ -0,0 +1,66 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script performs the third IXFR-IN test.  A BIND 9 nameserver (the
+# "server") contains a version of the zone (version N) and has IXFRs disabled.
+# A BIND 10 nameserver (the "client") is loaded with version N-2 of the zone.
+# A NOTIFY is sent to it, and it is expected that it will send an IXFR to the
+# server; the server should not respond to the request, so the client should
+# then send an AXFR request and receive the latest copy of the zone.
+
+# TODO It seems bind9 still allows IXFR even when provide-ixfr on;
+
+. ../ixfr_init.sh
+status=$?
+
+# Store the SOA serial number of the BIND 10 client for later use.
+old_client_serial=`$DIG_SOA @$CLIENT_IP | $AWK '{print $3}'`
+echo "I:SOA serial of IXFR client $CLIENT_NAME is $old_client_serial"
+
+# If required, get the IXFR server to notify the IXFR client of the new zone.
+# Do this by allowing notifies and then triggering a re-notification of the
+# server.
+echo "I:notifying IXFR-client $CLIENT_NAME of presence of new version of zone"
+do_rndc $SERVER_NAME $SERVER_IP notify example
+status=`expr $status + $?`
+
+# Wait for the client to update itself.
+wait_for_update $CLIENT_NAME $CLIENT_IP $old_client_serial
+status=`expr $status + $?`
+
+# Has updated, get the latest serial of the client and server - they
+# should be the same.
+compare_soa $SERVER_NAME $SERVER_IP $CLIENT_NAME $CLIENT_IP
+status=`expr $status + $?`
+
+# Check the log there's the IXFR and fallback
+grep XFRIN_XFR_TRANSFER_STARTED nsx2/bind10.run | grep IXFR
+if [ $? -ne 0 ];
+then
+    echo "R:$CLIENT_NAME FAIL no 'IXFR started' message in the BIND 10 log"
+    exit 1
+fi
+
+grep XFRIN_XFR_TRANSFER_FALLBACK nsx2/bind10.run
+if [ $? -ne 0 ];
+then
+    echo "R:$CLIENT_NAME FAIL no fallback message in BIND10 log"
+    exit 1
+fi
+
+echo "I:exit status: $status"
+exit $status
diff --git a/tests/system/ixfr/in-4/clean.sh b/tests/system/ixfr/in-4/clean.sh
new file mode 120000
index 0000000..099bebd
--- /dev/null
+++ b/tests/system/ixfr/in-4/clean.sh
@@ -0,0 +1 @@
+../clean_ns.sh
\ No newline at end of file
diff --git a/tests/system/ixfr/in-4/ns1/README b/tests/system/ixfr/in-4/ns1/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-4/ns1/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence.  It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-4/nsx2/README b/tests/system/ixfr/in-4/nsx2/README
new file mode 100644
index 0000000..aaa8a31
--- /dev/null
+++ b/tests/system/ixfr/in-4/nsx2/README
@@ -0,0 +1,3 @@
+This directory should be empty. A README file is placed here to ensure git
+notes the directory's presence.  It can be removed if other files are placed
+here.
diff --git a/tests/system/ixfr/in-4/setup.sh.in b/tests/system/ixfr/in-4/setup.sh.in
new file mode 100644
index 0000000..7419e27
--- /dev/null
+++ b/tests/system/ixfr/in-4/setup.sh.in
@@ -0,0 +1,30 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+. @abs_top_builddir@/tests/system/conf.sh
+# Clean up from last time
+
+sh clean.sh
+
+# Set up the initial version of the ixfr server - load the last-but-one version
+# of the zone.
+cp $IXFR_TOP/named_nonotify.conf ns1/named.conf
+cp -f $IXFR_TOP/db.example.n2.refresh ns1/db.example
+
+# Set up the IXFR client - load a previous version of the zone with a short
+# refresh time.
+cp -f $IXFR_TOP/b10-config.db nsx2/b10-config.db
+${B10_LOADZONE} -o . -d $IXFR_TOP/zone.sqlite3 $IXFR_TOP/db.example.n2.refresh
diff --git a/tests/system/ixfr/in-4/tests.sh b/tests/system/ixfr/in-4/tests.sh
new file mode 100644
index 0000000..3024253
--- /dev/null
+++ b/tests/system/ixfr/in-4/tests.sh
@@ -0,0 +1,53 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This script performs the fourth IXFR-IN test.  A BIND 9 nameserver (the
+# "server") contains a version of the zone (version N) and has IXFRs enabled.
+# A BIND 10 nameserver (the "client") is loaded with version N-2 of the zone
+# and a small refresh time.  After this expires, the IXFR client should send
+# an IXFR request to the IXFR server.
+
+. ../ixfr_init.sh
+status=$?
+
+# Ensure the server has the latest copy of the zone.  The implicit assumption
+# here is that starting the two systems and reloading the IXFR server takes
+# less time than the SOA refresh time set in the "db.example.n2.refresh" zone
+# file.
+cp $IXFR_TOP/db.example.n0 ns1/db.example
+do_rndc $SERVER_NAME $SERVER_IP reload
+
+# Store the SOA serial number of the BIND 10 client for later use.
+old_client_serial=`$DIG_SOA @$CLIENT_IP | $AWK '{print $3}'`
+echo "I:SOA serial of IXFR client $CLIENT_NAME is $old_client_serial"
+
+# Wait for the client to update itself. 30 seconds has been given as the
+# refresh interface and 2 seconds as the retry interval.  The wait_for_update
+# function will check for up to a minute looking for the new serial.
+wait_for_update $CLIENT_NAME $CLIENT_IP $old_client_serial
+status=`expr $status + $?`
+
+# Has updated, get the latest serial of the client and server - they
+# should be the same.
+compare_soa $SERVER_NAME $SERVER_IP $CLIENT_NAME $CLIENT_IP
+status=`expr $status + $?`
+
+# TODO: Check the BIND 10 log, looking for the IXFR messages that indicate that
+# the client has initiated the IXFR.
+
+echo "I:exit status: $status"
+exit $status
diff --git a/tests/system/ixfr/ixfr_init.sh.in b/tests/system/ixfr/ixfr_init.sh.in
new file mode 100644
index 0000000..ba6049e
--- /dev/null
+++ b/tests/system/ixfr/ixfr_init.sh.in
@@ -0,0 +1,330 @@
+#!/bin/sh
+#
+# Copyright (C) 2011  Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# \file
+# This file should be run by all IXFR tests before doing anything else.  It
+# includes the main configuration script to set the environment variables as
+# well as defining useful shell subroutines.
+
+. @abs_top_builddir@/tests/system/conf.sh
+
+# Useful symbols used in the IXFR tests.
+
+# Short-hand for getting SOA - just supply address of the server
+DIG_SOA="$DIG +norecurse +short -p $DNS_PORT example. SOA"
+
+# All IXFR tests use a BIND 9 server serving a BIND 10 client.  These have the
+# smae name and use the same address in all tests.
+SERVER_NAME=ns1
+SERVER_IP=10.53.0.1   # BIND 9
+
+CLIENT_NAME=nsx2
+CLIENT_IP=10.53.0.2   # BIND 10
+
+# \brief Check Arguments
+#
+# Most functions take the name of  nameserver as the first argument and its IP
+# address as the second.  This function is passed "$*" and just checks that
+# both $1 and $2 are defined.
+#
+# \arg $* Arguments passed to caller
+#
+# \return status 0 => $1 and $2 are defined, 1 => they are not.
+check_name_ip() {
+
+    if [ "$1" = "" ];
+    then
+        echo "R:FAIL name of server not supplied"
+        return 1
+    fi
+
+    if [ "$2" = "" ];
+    then
+        echo "R:FAIL IP address of server not supplied"
+        return 1
+    fi
+
+    return 0
+}
+
+
+# \brief Perform RNDC Command
+#
+# Controls the BIND 9 IXFR server.  Called do_rndc (instead of rndc) to avoid
+# confusion if rndc itself is in the search path.
+#
+# \arg $1 - Name of the server (ns1, nsx2 etc.)
+# \arg $2 - IP address of the server
+# \arg $* - Command to execute (which may be multiple tokens)
+#
+# \return 0 on success, 1 on failure (in which case an error message will
+#         have been output).
+do_rndc () {
+
+    # If the following checks fail, the code is wrong.
+
+    check_name_ip $*
+    if [ $? -ne 0 ];
+    then
+        echo "R:FAIL do_rndc - name or ip address of server not supplied"
+        return 1
+    fi
+
+    name=$1
+    shift
+    ip=$1
+    shift
+
+    if [ "$1" = "" ];
+    then
+        echo "R:FAIL do_rndc - rndc command not supplied"
+        return 1
+    fi
+    
+    $RNDC -c $SYSTEM_TOP/common/rndc.conf -s $ip -p $RNDC_PORT $* 2>&1 \
+        | sed "s/^/I:$name /"
+}
+
+# \brief Wait for update
+#
+# Given a serial number and a server, poll the nameserver until the SOA serial
+# number is different from that given.  The poll takes place every five seconds
+# for a minute.
+#
+# \arg $1 - Name of the server
+# \arg $2 - IP address of the server
+# \arg $3 - Serial number to check against
+#
+# \return 0 if the serial number is different (requires another poll to obtain
+#         it), 1 if the serial number has not changed after one minute.
+wait_for_update() {
+
+    # If the following checks fail, the code is wrong.
+
+    check_name_ip $*
+    if [ $? -ne 0 ];
+    then
+        echo "R:FAIL wait_for_update - name or ip address of system not supplied"
+        return 1
+
+    fi
+
+    name=$1
+    shift
+    ip=$1
+    shift
+
+    serial=$1
+    if [ "$serial" = "" ];
+    then
+        echo "R:FAIL wait_for_update - serial number not supplied"
+        return 1
+    fi
+
+    # Now poll the server looking for the new serial number
+
+    echo "I:$name waiting for SOA serial to change from $serial"
+    for i in 1 2 3 4 5 6 7 8 9 10 11 12
+    do
+        if [ $i -gt 1 ];
+        then
+            sleep 5
+        fi
+
+        new_serial=`$DIG_SOA @$ip | $AWK '{print $3}'`
+        if [ "$new_serial" != "$serial" ];
+        then
+            echo "I:$name SOA serial was at $serial, now at $new_serial"
+            return 0
+        fi
+    done
+
+    echo "R:$name FAIL serial number has not updated"
+    return 1
+}
+
+
+
+# \brief Update server zone
+#
+# Reloads the example. zone in the BIND 9 IXFR server and waits a maximum of
+# one minute for it to be served.
+#
+# \arg $1 - Name of the server (ns1, nsx2 etc.)
+# \arg $2 - IP address of the server
+# \arg $3 - Zone file to load
+# \arg $* - Command to execute (which may be multiple tokens)
+#
+# \return 0 on success, 1 on failure (for which an error message will have
+#         been output).
+update_server_zone() {
+
+    # If the following checks fail, the code is wrong.
+
+    check_name_ip $*
+    if [ $? -ne 0 ];
+    then
+        echo "R:FAIL update_server_zone - name or ip address of server not supplied"
+        return 1
+    fi
+
+    name=$1
+    shift
+    ip=$1
+    shift
+
+    file=$1
+    shift
+    if [ "$file" = "" ];
+    then
+        echo "R:FAIL update_server_zone - new zone file not supplied"
+        return 1
+    fi
+
+    if [ ! -e $file ];
+    then
+        echo "R:FAIL update_server_zone - zone file does not exist: $file"
+        return 1
+    fi
+
+    old_serial=`$DIG_SOA @$ip | $AWK '{print $3}'`
+
+    echo "I:$name IXFR server loading $file"
+    cp $file $name/db.example
+    do_rndc $name $ip reload
+    if [ $? -ne 0 ];
+    then
+        return 1    # Message will have already been output
+    fi
+
+    wait_for_update $name $ip $old_serial
+    if [ $? -ne 0 ];
+    then
+        echo "R:$name FAIL IXFR server did not update zone after reload"
+        return 1
+    fi
+    new_serial=`$DIG_SOA @$ip | $AWK '{print $3}'`
+
+    return 0
+}
+
+# \brief Compare client and server SOAs
+#
+# Checks the SOAs of two systems and reports if they are not equal.
+#
+# \arg $1 Name of the IXFR server
+# \arg $2 IP of the IXFR server 
+# \arg $3 Name of the IXFR client
+# \arg $4 IP of the IXFR client
+#
+# \return 0 if the systems have the same SOA, 1 if not.  In the latter case,
+#         an error will be output.
+compare_soa() {
+
+    # If the following checks fail, the code is wrong.
+
+    check_name_ip $*
+    if [ $? -ne 0 ];
+    then
+        echo "R:FAIL compare_soa - name or ip address of server not supplied"
+        return 1
+    fi
+
+    server_name=$1
+    shift
+    server_ip=$1
+    shift
+
+    check_name_ip $*
+    if [ $? -ne 0 ];
+    then
+        echo "R:FAIL compare_soa - name or ip address of client not supplied"
+        return 1
+    fi
+
+    client_name=$1
+    shift
+    client_ip=$1
+    shift
+
+    client_serial=`$DIG_SOA @$client_ip | $AWK '{print $3}'`
+    server_serial=`$DIG_SOA @$server_ip | $AWK '{print $3}'`
+    if [ "$client_serial" != "$server_serial" ];
+    then
+        echo "R:FAIL client $client_name serial $client_serial not same as server $server_name serial $server_serial"
+        return 1
+    fi
+
+    return 0
+}
+
+# \brief Compare client and server zones
+#
+# Checks the zones of two systems and reports if they are not identical.
+#
+# The check is simplistic. Each zone is listed via "dig", after which comment
+# lines, blank lines and spaces/tabs are removed, and the result sorted.  The
+# output from each system is then compared.  They should be identical.
+#
+# \arg $1 Name of the IXFR server
+# \arg $2 IP of the IXFR server 
+# \arg $3 Name of the IXFR client
+# \arg $4 IP of the IXFR client
+#
+# \return 0 if the zones are the same, 1 if not.
+compare_zones() {
+
+    # If the following checks fail, the code is wrong.
+
+    check_name_ip $*
+    if [ $? -ne 0 ];
+    then
+        echo "R:FAIL compare_zones - name or ip address of server not supplied"
+        return 1
+    fi
+
+    server_name=$1
+    shift
+    server_ip=$1
+    shift
+
+    check_name_ip $*
+    if [ $? -ne 0 ];
+    then
+        echo "R:FAIL compare_zones - name or ip address of client not supplied"
+        return 1
+    fi
+
+    client_name=$1
+    shift
+    client_ip=$1
+    shift
+
+    $DIG @$client_ip -p $DNS_PORT example. axfr | grep -v '^;' | grep -v '^$' \
+         | sed -e 's/ //g' -e 's/\t//g' | sort > client.dig
+    $DIG @$server_ip -p $DNS_PORT example. axfr | grep -v '^;' | grep -v '^$' \
+         | sed -e 's/ //g' -e 's/\t//g' | sort > server.dig
+    diff client.dig server.dig
+    if [ $? -eq 0 ];
+    then
+        echo "I:client and server zones identical"
+    else
+        echo "R:FAIL client $client_name zone not same as server $server_name zone"
+        return 1
+    fi
+
+    return 0
+}
diff --git a/tests/system/ixfr/named_noixfr.conf b/tests/system/ixfr/named_noixfr.conf
new file mode 100644
index 0000000..d171876
--- /dev/null
+++ b/tests/system/ixfr/named_noixfr.conf
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2004, 2007, 2011  Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (C) 2000, 2001  Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+include "../../../common/rndc.key";
+
+controls {
+        inet 10.53.0.1 port 9953 allow { any; } keys { rndc_key; };
+};
+
+options {
+	query-source address 10.53.0.1;
+	notify-source 10.53.0.1;
+	transfer-source 10.53.0.1;
+	port 53210;
+	pid-file "named.pid";
+	listen-on { 10.53.0.1; };
+	listen-on-v6 { none; };
+	recursion no;
+	ixfr-from-differences no;
+	notify explicit;
+	also-notify { 10.53.0.2; };
+    provide-ixfr no;
+};
+
+zone "example" {
+	type master;
+	file "db.example";
+};
diff --git a/tests/system/ixfr/named_nonotify.conf b/tests/system/ixfr/named_nonotify.conf
new file mode 100644
index 0000000..c08c212
--- /dev/null
+++ b/tests/system/ixfr/named_nonotify.conf
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2004, 2007, 2011  Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (C) 2000, 2001  Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+include "../../../common/rndc.key";
+
+controls {
+        inet 10.53.0.1 port 9953 allow { any; } keys { rndc_key; };
+};
+
+options {
+	query-source address 10.53.0.1;
+	notify-source 10.53.0.1;
+	transfer-source 10.53.0.1;
+	port 53210;
+	pid-file "named.pid";
+	listen-on { 10.53.0.1; };
+	listen-on-v6 { none; };
+	recursion no;
+	ixfr-from-differences yes;
+	notify no;
+};
+
+zone "example" {
+	type master;
+	file "db.example";
+};
diff --git a/tests/system/ixfr/named_notify.conf b/tests/system/ixfr/named_notify.conf
new file mode 100644
index 0000000..df45e6f
--- /dev/null
+++ b/tests/system/ixfr/named_notify.conf
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007, 2011  Internet Systems Consortium, Inc. ("ISC")
+ * Copyright (C) 2000, 2001  Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+include "../../../common/rndc.key";
+
+controls {
+        inet 10.53.0.1 port 9953 allow { any; } keys { rndc_key; };
+};
+
+options {
+	query-source address 10.53.0.1;
+	notify-source 10.53.0.1;
+	transfer-source 10.53.0.1;
+	port 53210;
+	pid-file "named.pid";
+	listen-on { 10.53.0.1; };
+	listen-on-v6 { none; };
+	recursion no;
+	ixfr-from-differences yes;
+	notify explicit;
+	also-notify { 10.53.0.2; };
+};
+
+zone "example" {
+	type master;
+	file "db.example";
+};
diff --git a/tests/system/run.sh b/tests/system/run.sh
deleted file mode 100755
index 4f852f4..0000000
--- a/tests/system/run.sh
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/sh
-#
-# Copyright (C) 2004, 2007, 2010  Internet Systems Consortium, Inc. ("ISC")
-# Copyright (C) 2000, 2001  Internet Software Consortium.
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-#
-# Run a system test.
-#
-
-SYSTEMTESTTOP=.
-. $SYSTEMTESTTOP/conf.sh
-
-stopservers=true
-
-case $1 in
-   --keep) stopservers=false; shift ;;
-esac
-
-test $# -gt 0 || { echo "usage: $0 [--keep] test-directory" >&2; exit 1; }
-
-test=$1
-shift
-
-test -d $test || { echo "$0: $test: no such test" >&2; exit 1; }
-
-echo "S:$test:`date`" >&2
-echo "T:$test:1:A" >&2
-echo "A:System test $test" >&2
-
-if [ x$PERL = x ]
-then
-    echo "I:Perl not available.  Skipping test." >&2
-    echo "R:UNTESTED" >&2
-    echo "E:$test:`date`" >&2
-    exit 0;
-fi
-
-$PERL $TESTSOCK || {
-    echo "I:Network interface aliases not set up.  Skipping test." >&2;
-    echo "R:UNTESTED" >&2;
-    echo "E:$test:`date`" >&2;
-    exit 0;
-}
-
-
-# Check for test-specific prerequisites.
-test ! -f $test/prereq.sh || ( cd $test && sh prereq.sh "$@" )
-result=$?
-
-if [ $result -eq 0 ]; then
-    : prereqs ok
-else
-    echo "I:Prerequisites for $test missing, skipping test." >&2
-    [ $result -eq 255 ] && echo "R:SKIPPED" || echo "R:UNTESTED"
-    echo "E:$test:`date`" >&2
-    exit 0
-fi
-
-# Check for PKCS#11 support
-if
-    test ! -f $test/usepkcs11 || sh cleanpkcs11.sh
-then
-    : pkcs11 ok
-else
-    echo "I:Need PKCS#11 for $test, skipping test." >&2
-    echo "R:PKCS11ONLY" >&2
-    echo "E:$test:`date`" >&2
-    exit 0
-fi
-
-# Set up any dynamically generated test data
-if test -f $test/setup.sh
-then
-   ( cd $test && sh setup.sh "$@" )
-fi
-
-# Start name servers running
-$PERL start.pl $test || exit 1
-
-# Run the tests
-( cd $test ; sh tests.sh )
-
-status=$?
-
-if $stopservers
-then
-    :
-else
-    exit $status
-fi
-
-# Shutdown
-$PERL stop.pl $test
-
-status=`expr $status + $?`
-
-if [ $status != 0 ]; then
-	echo "R:FAIL"
-	# Don't clean up - we need the evidence.
-	find . -name core -exec chmod 0644 '{}' \;
-else
-	echo "R:PASS"
-
-	# Clean up.
-	if test -f $test/clean.sh
-	then
-	   ( cd $test && sh clean.sh "$@" )
-	fi
-fi
-
-echo "E:$test:`date`"
-
-exit $status
diff --git a/tests/system/run.sh.in b/tests/system/run.sh.in
new file mode 100755
index 0000000..619b865
--- /dev/null
+++ b/tests/system/run.sh.in
@@ -0,0 +1,125 @@
+#!/bin/sh
+#
+# Copyright (C) 2004, 2007, 2010  Internet Systems Consortium, Inc. ("ISC")
+# Copyright (C) 2000, 2001  Internet Software Consortium.
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Run a system test.
+#
+
+SYSTEMTOP=@abs_top_builddir@/tests/system
+. $SYSTEMTOP/conf.sh
+
+stopservers=true
+
+case $1 in
+   --keep) stopservers=false; shift ;;
+esac
+
+test $# -gt 0 || { echo "usage: $0 [--keep] test-directory" >&2; exit 1; }
+
+test=$1
+shift
+
+test -d $test || { echo "$0: $test: no such test" >&2; exit 1; }
+
+echo "S:$test:`date`" >&2
+echo "T:$test:1:A" >&2
+echo "A:System test $test" >&2
+
+if [ x$PERL = x ]
+then
+    echo "I:Perl not available.  Skipping test." >&2
+    echo "R:UNTESTED" >&2
+    echo "E:$test:`date`" >&2
+    exit 0;
+fi
+
+$PERL $TESTSOCK || {
+    echo "I:Network interface aliases not set up.  Skipping test." >&2;
+    echo "R:UNTESTED" >&2;
+    echo "E:$test:`date`" >&2;
+    exit 0;
+}
+
+
+# Check for test-specific prerequisites.
+test ! -f $test/prereq.sh || ( cd $test && sh prereq.sh "$@" )
+result=$?
+
+if [ $result -eq 0 ]; then
+    : prereqs ok
+else
+    echo "I:Prerequisites for $test missing, skipping test." >&2
+    [ $result -eq 255 ] && echo "R:SKIPPED" || echo "R:UNTESTED"
+    echo "E:$test:`date`" >&2
+    exit 0
+fi
+
+# Check for PKCS#11 support
+if
+    test ! -f $test/usepkcs11 || sh cleanpkcs11.sh
+then
+    : pkcs11 ok
+else
+    echo "I:Need PKCS#11 for $test, skipping test." >&2
+    echo "R:PKCS11ONLY" >&2
+    echo "E:$test:`date`" >&2
+    exit 0
+fi
+
+# Set up any dynamically generated test data
+if test -f $test/setup.sh
+then
+   ( cd $test && sh setup.sh "$@" )
+fi
+
+# Start name servers running
+$PERL $SYSTEMTOP/start.pl $test || exit 1
+
+# Run the tests
+( cd $test ; sh tests.sh )
+
+status=$?
+
+if $stopservers
+then
+    :
+else
+    exit $status
+fi
+
+# Shutdown
+$PERL $SYSTEMTOP/stop.pl $test
+
+status=`expr $status + $?`
+
+if [ $status != 0 ]; then
+	echo "R:FAIL"
+	# Don't clean up - we need the evidence.
+	find . -name core -exec chmod 0644 '{}' \;
+else
+	echo "R:PASS"
+
+	# Clean up.
+	if test -f $test/clean.sh
+	then
+	   ( cd $test && sh clean.sh "$@" )
+	fi
+fi
+
+echo "E:$test:`date`"
+
+exit $status
diff --git a/tests/system/start.pl b/tests/system/start.pl
index 56f00c4..daa4577 100755
--- a/tests/system/start.pl
+++ b/tests/system/start.pl
@@ -53,7 +53,7 @@ if ($server && !-d "$test/$server") {
 my $topdir = abs_path("$test/..");
 my $testdir = abs_path("$test");
 my $RUN_BIND10 = $ENV{'RUN_BIND10'};
-my $NAMED = $ENV{'NAMED'};
+my $NAMED = $ENV{'BIND9_NAMED'};
 my $LWRESD = $ENV{'LWRESD'};
 my $DIG = $ENV{'DIG'};
 my $PERL = $ENV{'PERL'};
@@ -182,7 +182,7 @@ sub start_server {
 		exit 1;
 	}
 
-	#               print "I:starting server $server\n";
+	print "I:starting server $server\n";
 
 	chdir "$testdir/$server";
 
diff --git a/tests/tools/badpacket/badpacket.cc b/tests/tools/badpacket/badpacket.cc
index 86bbc47..be393d5 100644
--- a/tests/tools/badpacket/badpacket.cc
+++ b/tests/tools/badpacket/badpacket.cc
@@ -18,6 +18,7 @@
 #include <config.h>
 
 #include <exceptions/exceptions.h>
+#include <log/logger_support.h>
 #include "command_options.h"
 #include "scan.h"
 
@@ -44,6 +45,7 @@ using namespace isc::badpacket;
 
 /// \brief Main Program
 int main(int argc, char* argv[]) {
+    isc::log::initLogger("badpacket");
 
     try {
         // Parse command




More information about the bind10-changes mailing list