BIND 10 trac1198, updated. 95b7c29f155b8fa21cf85c6d3afbe3f510db83d1 [1198] Merge branch 'master' into trac1198

BIND 10 source code commits bind10-changes at lists.isc.org
Fri Dec 9 13:25:55 UTC 2011


The branch, trac1198 has been updated
       via  95b7c29f155b8fa21cf85c6d3afbe3f510db83d1 (commit)
       via  a677ae91c9d7a5bd2ef8574f84e9f33f90c45e44 (commit)
       via  9b2b249d23576c999a65d8c338e008cabe45f0c9 (commit)
       via  45274924f3f25b70547809eeda5dbcbe230029b5 (commit)
       via  a6a35e9970e4937924c1e33c01f6bd7eaf1ed994 (commit)
       via  c8dc421c5cad0f3296174b44f8deccfb69dec43f (commit)
       via  91fb141bfb3aadfdf96f13e157a26636f6e9f9e3 (commit)
       via  37a11387baa321daec8311fc66d5d83e567886bd (commit)
       via  cee6a7af5f4f116ea89ecfde9a235dfe727bf207 (commit)
       via  a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2 (commit)
       via  7f5229256540479ef63d707a14d194c80099fab3 (commit)
       via  c0be7a6c0e12c78a7e02a2a3b3b259a3382b52bf (commit)
       via  e82cf73f21e06b1a301ff120fb5f73203d343111 (commit)
       via  777d6f3037738200a8a8426a0b957b18011d460a (commit)
       via  b705708aafbe566facc578a02b2f1cce44dff86f (commit)
       via  a56e72ce1bbc9d016a7ebd83eaba0aadbf2b41aa (commit)
       via  318dceaa39aa30ee9d394e1e096d4891f3bee490 (commit)
       via  25ac24557f8789f516ac7ffa1db831701ebf3c37 (commit)
       via  2a6b5e55caf422997b893e319db83855fe1709b1 (commit)
       via  83ecce54ed1ef5215f722e8339ae4a43f50ada5c (commit)
       via  8dcf5eebb1b81e6cdc963985daa6c80497ac8c16 (commit)
       via  662233a1483040da5dbc29dd9c9baf6bf0832223 (commit)
       via  0e3736c7c3e882ba3f0616b9d0877792edd73317 (commit)
       via  4b57a79735953705a82d8595a8ac541f7deb7a74 (commit)
       via  b41b7dc34a8a14339a1ff9daf1d705997d9abc43 (commit)
       via  6bda5426c6f8b4e9faefc2075575e1c98bc3907c (commit)
       via  6ff03bb9d631023175df99248e8cc0cda586c30a (commit)
       via  0c8c0f808a9ddc3a27a87f55964a965fb30f18ef (commit)
       via  df44752c7d69b865fa346215c1ec428153d5a3ca (commit)
       via  60fd293717cc45323cfb10cf06d5bd264fa083cc (commit)
       via  4220ef5ac9c8fdd4b506b3579f0e5eec98e3f3d8 (commit)
       via  56be59fbcdc0ba54ccea0d09d49ef28dace3d65d (commit)
       via  1341209064bc7afd8e720e3b12060239c368bcdd (commit)
       via  86a4ce45115dab4d3978c36dd2dbe07edcac02ac (commit)
       via  14a64484a3159a142f1b83a9830ac389a52f6a35 (commit)
       via  146203239c50d2a00069986944d4ec168f17b31f (commit)
       via  b7b90e50531bcbd2caaffe6b51aea8917a56f40d (commit)
       via  fd5713eae0276a3d623953c88fc6281aab0b71d3 (commit)
       via  4c0fd2ba248e5f925566e02724b34c85179a8c51 (commit)
       via  d408808ea8e4fe24a21d13992a88ad62c22634b8 (commit)
       via  cce182ed937f7294b2a11a3af5b2e03a380b7cd5 (commit)
       via  982fc000a3064515ac30f5457b71802577fec90d (commit)
       via  eb08c5acb5deafa28ae37032910cce9a385d2030 (commit)
       via  b2186bf05d8d9858b0b58cf9dca5b215afe447f5 (commit)
       via  88f94cf8e025558b14091af5050e2ce424237ea0 (commit)
       via  4db174c8f096e2b54b3a5d384a6cffc25b9d9024 (commit)
       via  49f1f1bc36042d4b4c27a002e963f400deb694d7 (commit)
       via  3bb1cc7d961930edc38d9f8b34d0cccd3d69dd96 (commit)
       via  6583a47dde1b851aee99de3c38c6331a22ede260 (commit)
       via  99033305fa90310135d37118a0d47df3f2223770 (commit)
       via  b7bbe25fdf0d0c168c24c904c82c7e04fc269bba (commit)
       via  862b0e38047101afef0f6d81ba3f0d74c9a51ea5 (commit)
       via  cf5e4481b1f1fb00e9897e4cd0527a9a707c4a63 (commit)
       via  0ae049de728692224b087e95a645f45f4a69cb68 (commit)
       via  1d5d7d8ea8d4dae23783b71e4a93165a36124663 (commit)
       via  696f576c4743130bc8a77844c700582d5faaf391 (commit)
       via  9f792ee32ba42a44291277d0577196e03a929738 (commit)
       via  7a1997b2d44d05c179adecad0bf68c1f7c19935f (commit)
       via  e580e44a38c8dcf02e6ff40ba5977f18c8200572 (commit)
       via  660cf410c0fb41587b977df992879f5dff934c19 (commit)
       via  b09fdcc6b45d4580b138cc9f59bfc051bd6ad360 (commit)
       via  4d97ef5cdb4833a7a36b6679c16338505b07d4e3 (commit)
       via  424f32864efcd2c647c6e5303125b6a8afb421ea (commit)
       via  11885ff9e91c98ff0c4e93d81fc2b3d47a02090d (commit)
       via  0c53ade72d9589d3d521937652af20f9d7a20f8e (commit)
       via  cc20ff993da1ddb1c6e8a98370438b45a2be9e0a (commit)
       via  2142e8e6f760c577b58747c515c38fcc10168e04 (commit)
       via  402c03afffde1e664c9dbd7b3c40e78a23b261c5 (commit)
       via  b3c9fffb335e7a42ff07a99016df46ccaf3dae97 (commit)
       via  50fdb098fc80146a714794cb9156ac03de909103 (commit)
       via  a4f7763150aa2ced077f67bddbaa39255bd2fbf9 (commit)
       via  d3792aa7fc6ca920c7f9a3f36318ea1160974850 (commit)
       via  9351ba5d5461f8f93a169a99a5e378416a970bd5 (commit)
       via  8376919647ef84268085bfdd56271714416a6405 (commit)
       via  4e636c3e9a365304fc463929cdddcd555dcb3ad2 (commit)
       via  73721a97b1f3741bf58bf774601fef99a4ecb54f (commit)
       via  6764e7da0d5c6967e5607fc6e31c112895ed1827 (commit)
       via  40cfd32c280020af33a28c1501380a17ce604175 (commit)
       via  f9cbe6fb6e0a3d3dc03218429f530e6c01920169 (commit)
       via  18128a99fd70d66eb09312dd8dfa0f0521033f97 (commit)
       via  e7019de8a8ec9ff562557c9fc2a0bd28a4c64829 (commit)
       via  173de1cea65293e5f7cfb904454ee4fa96c51e1d (commit)
       via  e533dc83ccb7bf541e53f753c28a52248d7b195b (commit)
       via  2ae72d76c74f61a67590722c73ebbf631388acbd (commit)
       via  1ecbd2b16b44b6439030fd245f951fe5a538ecc5 (commit)
       via  83ce13c2d85068a1bec015361e4ef8c35590a5d0 (commit)
       via  5f34cb40976859771ab996113f78937310e7bda5 (commit)
       via  8ee52fc304388aef162a819e2e59066bb70f0618 (commit)
       via  3fdce88046bdad392bd89ea656ec4ac3c858ca2f (commit)
       via  6690b8985d656aba3f25082cb62c9c02e5ad5a0b (commit)
       via  7715c727d25d6430cbdbd82e40bdb7b3fa2ea843 (commit)
       via  6d921ed561b6ef9d26273ca321dfa24622a982b5 (commit)
       via  144549c04afdc36a98c7530eaebafb2b3d38d545 (commit)
       via  a26b979adb54baabdf939ed1a7852b2ee9b8b93c (commit)
       via  eb703a7e5b3749ca95a43c7582c9cccde564f123 (commit)
       via  4464612807e6c4bd120298ca105b0503af0d3110 (commit)
       via  cbe600decbef4db82cb3b070e03b5702540af4aa (commit)
       via  c44075a40764cbb5dc37e9dd3666ce46bb8c7955 (commit)
       via  0b5da8bd0800bfa3744e23c367cee2c38de7a497 (commit)
       via  7ac21664665acee54a2a57331a2afc2a0c0a3530 (commit)
       via  96a32f0ab063cbcd98fae0d5a87bc286bb8a7498 (commit)
       via  7019db2a44f39897486eea618f4447c37dbabcf8 (commit)
       via  024808d2a40b695f6c7191398c5a3d2c39c26736 (commit)
       via  9df50bec4e691dc8cb724547659fb71caad656ab (commit)
       via  3a206ab523d4612676362274ae8c30def53ac15e (commit)
       via  15dffb02f179974c6726f16aff586c49eec8c7ca (commit)
       via  ad90525811869e2ff6fb5e24d38bf19e5743767e (commit)
       via  936d5cad35355e1785550f7150f90e688166f448 (commit)
       via  0737908f9e7cb615f80354131dca4df1a8c0bff6 (commit)
       via  d6d7a352b0b0df685f285cd413568b0e475339da (commit)
       via  82fdeb65eba233a63b4425c7c5528a6257b91703 (commit)
       via  5832af30821d4d4d077946b063b8f53056fa7e60 (commit)
       via  1d24818f927edb1840f673f1ba67d7a45d9ef1c2 (commit)
       via  d6d90c1976110dcfb94cba2c56086960054cdeae (commit)
       via  f06cabd31d8e43781e4e32bdfdf24c78931d3ca8 (commit)
       via  0e945f09e0e127e5097c32e5c84d96e34a18b3b6 (commit)
       via  c9be5877151f0564725d1cd9a20fe393fe7b422f (commit)
       via  560122414abc11fa2a39331734c607cc37a4e76c (commit)
       via  5468c7defd530b29696108cbda6d278b14be351b (commit)
       via  64853ae0cca2070a0536ee6f499084c8a9017fa2 (commit)
       via  1d8a592d1301b7e3a39c88ce1e001122db125307 (commit)
       via  9682bf18607745c83437cd4592d3289e68410772 (commit)
       via  20a6000a9f69476797477ca7af5fd83b8e236909 (commit)
       via  537af1705fc5c1695b4b601571f65ead81dc1289 (commit)
       via  1485c897a9e2c71ed2a33c8972c116a5f7e8e078 (commit)
       via  b7ac17da5405582098e98ed22bf122fe87658923 (commit)
       via  ada65c7115b9840f509f2b0c640368735fe69520 (commit)
       via  b8d14d2e45ee719e4e33adbecddafb4ae3aa4df1 (commit)
       via  966fdcc69001cd2562ca96b392b9a45e7c336224 (commit)
       via  567260cde6e21499ad4bf47789d538a929df5552 (commit)
       via  c789138250b33b6b08262425a08a2a0469d90433 (commit)
       via  b092df6f17e5d8f8f07e726fc4006e346417d49f (commit)
       via  d9b851b96c9fb3f56c4fe3a626f5c2b05bbb7a5f (commit)
       via  e665914a467810569a22e093a56ff5c711179143 (commit)
       via  5b64e839be2906b8950f5b1e42a3fadd72fca033 (commit)
       via  4dc636c8d46d857f617bfef2ae9444dce438cff4 (commit)
       via  eb06cb8dfea727c5d9366583581ca674d23c4c2e (commit)
       via  e35e9b8a1cef995079ef15b0321aa7b420139226 (commit)
       via  5de824f59cd2ba7e8cbb3cc58c4cd42c585c09c3 (commit)
       via  9300ad5a1030e50ab76ff8a6f87b4d91d2d2b124 (commit)
       via  8bab1dfcb1f33d58bf64f4c86ca7ba860b57cc76 (commit)
       via  5e07929601d0799df76eaaf3ac5165b634efc556 (commit)
       via  81f62344db074bc5eea3aaf3682122fdec6451ad (commit)
       via  eb6053d466fcea08fa66205d598d316e550863c8 (commit)
       via  42d4a37a121ea7df3440268fe15995267fb66b12 (commit)
       via  1b186f0a6fc242fa6dff08944ef43b60010d3631 (commit)
       via  1825a24fe6fcda419cf2cdcd05180aa1b18ca526 (commit)
       via  fa7208a015545459cf56b03001fa1e6681e52d3a (commit)
       via  34de4dab534c2ccc735f6c815aa0459553aa1153 (commit)
       via  b6568546ccdac044fd30200a54708f9418e7af9d (commit)
       via  936511f6e114f26bf86497466a7f61ef467bf5ad (commit)
       via  7f573f432cfca90d2f9409829f14b3645083b9af (commit)
       via  b586771730eb1d22330e3a4f46c6c596d6ab57da (commit)
       via  137abb738558ae9602f834890f477a924b520001 (commit)
       via  14c51c664a98beb4867728d528190aff335e6f27 (commit)
       via  6a4afc2165e4e6e692e71cb6795201c9df5afee2 (commit)
       via  047ea7f6cfa2677865dcf441726dcc3e082608a9 (commit)
       via  de9532ce586ac69ff58dad2096f23db0cb062639 (commit)
       via  a81436b0c8f9c3974ec3373f586c45e2cf03cb64 (commit)
       via  1a81569fb7c422d499f5a8eeef2d70d20e3284c6 (commit)
       via  07b884ef0f72044fa5a5fd661ab068794ff68ca6 (commit)
       via  51f3cb54492ef02e4951afb15a9c40ba0cdff4ce (commit)
       via  51c9278d000daee776c5e12456d8c4ea60ff5f21 (commit)
       via  207038a3ff7503cd2b2ab44238c71da55912bb4a (commit)
       via  97cf501e33b45c373aa12a3cb8ae76909d3522bc (commit)
       via  4ca30d27a1149bf5c445f382c4767b5c4e168d95 (commit)
       via  f6def2435fe72e00a782244461e8a186a4a23e63 (commit)
       via  c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca (commit)
       via  75fc5817606b892c8eeb4964dc2f3d2f044f5491 (commit)
       via  64c2d16fff1dd9e903378a55087843ad058791f5 (commit)
       via  7ab1afe9a76986c4f175c338fdd6a8076a9d6dc9 (commit)
       via  e99a54597a5bb6dde1a0240ab74ac010b5029afb (commit)
       via  f02b9adf8e899f9358a26e087cfb43a5d4657b07 (commit)
       via  3d5f2c3c14bcbf9cb7441f61ac8f84bceb8e6594 (commit)
       via  c1171699a2b501321ab54207ad26e5da2b092d63 (commit)
       via  8f5429e41cb99c615abc1de0ee549702ca638217 (commit)
       via  52fa244c12eb6d708b0260b5554396df5d00b079 (commit)
       via  3e1a6afcabbef24f9501d8f4e3ed27d824db4309 (commit)
       via  ce54ff203a48042d3fa085037a23b315ccc2ecca (commit)
       via  8fe1dbdadf8ce7aa288ae08320f315ab56433cb6 (commit)
       via  f76fdc8db2c0c9eba1603af7fa0272e7955e20d8 (commit)
       via  6247db1cab96103bc06a6a281963227084cfb68d (commit)
       via  a9dc55c6cc18e2ed28f79cfbbdf7408a64a04ca4 (commit)
       via  0dedcdb128646fdbf37be96f91076adda2f37c95 (commit)
       via  fc6a79af0d625ca18a2cdc3df91e86e8c1e02f9c (commit)
       via  1dddec95f5e398269b28473a094dd6ad00ce648b (commit)
       via  9150be5e5d0d6c0a46e44a0bbcdbd235c74bd6a7 (commit)
       via  f11f46b276646364fc115783ccc3d706510a2ee8 (commit)
       via  5dc772648fb9779359b4d409086d55745afccad9 (commit)
       via  151a4b05228e38b2031d095e438b63ae75dc0b76 (commit)
       via  3924f73fed9f8158918713b09672175f09a973e4 (commit)
       via  411a806a41666b522ed35552588789d114cc1390 (commit)
       via  ab3f90da16d31fc6833d869686e07729d9b8c135 (commit)
       via  e12070c73b529d348f64f8f6e24d75ce710a8a12 (commit)
       via  710e8207090f894b14eaa9834a9c6cd551ea950d (commit)
       via  80c131f5b0763753d199b0fb9b51f10990bcd92b (commit)
       via  a01eb512f67a14855fc9be9fff561c3c86634e0b (commit)
       via  635662711c673bbcfc8fac95c96cfdc33702ca94 (commit)
       via  15e23bca2cf7f266d32c6bb30a142a80ee543227 (commit)
       via  ec1cc2b4be6e19519644534889865a3ee2c81a8a (commit)
       via  277b80e0671586d8ace205cb53465b1f6f414466 (commit)
       via  a435f3ac50667bcb76dca44b7b5d152f45432b57 (commit)
       via  6dd270220a4bac70fa4cd6a898e331b658fe0af2 (commit)
       via  1bb5168b7014a83690d1bb363dbcc0fa6d8fd7f1 (commit)
       via  ddb6d109c0947f203eaa6265a22d2fb3b166db0b (commit)
       via  2eb9f486619e27aee0684f840c85d152b3ddfe0f (commit)
       via  71378c1048bb610c748788dabfd04e421f6b4ac0 (commit)
       via  de43982b90d0fafd6b4e1857e366a6cd983cfab7 (commit)
       via  77d69c99f2b3cc8ee627d8f73174ead9f03da412 (commit)
       via  3ff33cfedcca0cd1acb80a5cf2651f89403a82a9 (commit)
       via  cf297878998c80018ba9523a53ae4947fc7e6a5e (commit)
       via  52f4e6faf56afb5c0432f88d5b1528090530c62e (commit)
       via  13f108b25fbccc56b731bd5bcc505cdf48e91e91 (commit)
       via  4d3aef6a83965e26781d6b74f0ff913926845c7c (commit)
       via  fb33c8d379f9e75b82edafff45d4dc13fda62630 (commit)
       via  4f02b45248227dd98904b61bbcd2e6cff36b5fd6 (commit)
       via  54d9d7c1597df3bcdf47d07db040f63f7008c6a7 (commit)
       via  48c07943ac1dd24922f46cf970c214b5cf24813f (commit)
       via  bea7b0e3fde35a335bb9e6cf170b0fc240650275 (commit)
       via  9b1c64b7d164b6b27d126e55391b2bbafeaf8c00 (commit)
       via  96bf3ab5271347542e13b52e2c37b9c8810a6fad (commit)
       via  c59bb2dcd90a5d580a7f3c9e42a54a080f763add (commit)
       via  319bc2d65301606aa938363dcb30a8519755886e (commit)
       via  d953caeeaf821743ed27ef4a47a45bef66615dc9 (commit)
       via  5d382b4295b8455fae844a5ca94886788f6cb19b (commit)
       via  d08c42ad20f2c91bf64ef47ed893fa2aac4ff037 (commit)
       via  8b92bb931e29b7b1bbb8147cda4f7d0aac507ac1 (commit)
       via  08915b387e64f3cf9d9a86a5a21c4492db3a488c (commit)
       via  1d4541dfd067cd2f0c9e155049c2b7f9d70fa896 (commit)
       via  1f6edd11fbf7e0143f99f20fc714044b989b299a (commit)
       via  ecf6a71b5845c6710119dd97b500c7edeb3f44c2 (commit)
       via  a24c6579ab039afd67ecb50a71b9fc8eabf9b6c7 (commit)
       via  3647e8ff9c194c1c0a576558f4f49ba4ff2614e7 (commit)
       via  c3d71baca757b39e13968369e0afb39dd4472eb8 (commit)
       via  a9040d4aba8e3c01a77236c81f07e2b06b300918 (commit)
       via  35556de064c193779c3cd5e5b0fde583f4a8d598 (commit)
       via  c4f22c20ee19e1ffba43914671c059a434f4518c (commit)
       via  12b72af07f5e06cf172b115b0acba3fbe3554467 (commit)
       via  ecd9c5fc4b3cf747e2b5a221504feac3adeb236e (commit)
       via  fc0a31681f7a8e4198068be0038eb9a4f8a74ec7 (commit)
       via  d3db538710b6547cc2e04127fb5fc9d2d5a181f9 (commit)
       via  2ab2fd55d4a12d1469060a3657893121114e2e2f (commit)
       via  2dd7ee33a13a07a00e22fbc81ecb8b19b57efa8f (commit)
       via  5cea4cfbee9770f4299f5a701af89f7cbf977ef4 (commit)
       via  1af57091dc0c38cff538de2470275f25caeb2eab (commit)
       via  256c0a08483ac2bf396dfa8424b4c02f0681a0f4 (commit)
       via  8f74718cc2012ca68a44d9ed9996f479c6834101 (commit)
       via  5c92f567d93977bd56a7ed2898c7bee098b552ab (commit)
       via  956a0a589db0a8250ec94ece377657783ac15caf (commit)
       via  39def1d39c9543fc485eceaa5d390062edb97676 (commit)
       via  bcb432839cacdf10172d49dec94292871aee3526 (commit)
       via  164d651a0e4c1059c71f56b52ea87ac72b7f6c77 (commit)
       via  09f6d6281a4203a91dcfb6c56e240c06f11935b6 (commit)
       via  76fb414ea5257b639ba58ee336fae9a68998b30d (commit)
       via  e5f37058b67c641b8eb024bd48ca269ae9e41163 (commit)
       via  934a07b6d0ebec8bab258398894905de32878a8b (commit)
       via  40f6dd2b378f31f4ec561eeeac534874a02a8ae8 (commit)
       via  84fa061af28d72e51939039bfcbb04e1febc3cb1 (commit)
       via  b54f1b460285db4d6ae89dd716098a88363b1511 (commit)
       via  c1138d13b2692fa3a4f2ae1454052c866d24e654 (commit)
       via  35b1914ce6ab5481ce40f584729d0949746c2219 (commit)
       via  4df29b3303dbce85b8143d8d74935b3c9283fb31 (commit)
       via  33a956b09f22597d91929b22542913412757e279 (commit)
       via  ed91f985331705fc345bec838697c9bda4b6b7e4 (commit)
       via  1219d81b49e51adece77dc57b5902fa1c6be1407 (commit)
       via  8380ccceca1b8412fbc6742cb37dbd7de843ac50 (commit)
       via  38d84c59fbc097e57d03ac10d6a83edc63c4cffa (commit)
       via  c0cc183880fc5e1949bcc97585c20ac2ab21e281 (commit)
       via  2d85e22f10321fbc5b9cd12f70e90907cb01830f (commit)
       via  1c9f121360e6e612d02d365d70bd0843f8f93457 (commit)
       via  d142274062ed21d53f9e0b2a85531c935580013c (commit)
       via  5de9e8a440b6b4ed8c6bbce156d75b740ec4c1b5 (commit)
       via  631c5c2d24ba8be2b12930cc8267b2298414d563 (commit)
       via  1b3e21e08311d84d649a2780471e9a8b46143dca (commit)
       via  ddf219d781a40764999bd8b19c80f607c2783b57 (commit)
       via  4a68215905542025570f06fcc703fa44d6b37cfd (commit)
       via  315f4999df039dbb2baa77ee12afa0dfbe01dc25 (commit)
       via  7344d2788cd06e54ca7ca3e3a3f69010dac80670 (commit)
       via  ce546dddcbbf7efc4778c1d0d4210ca139ed5bf9 (commit)
       via  fa89a0798d166574e089b38d7bd43a701eda5467 (commit)
       via  12b1a920f219e627bb5860f0a0217cc5c86749e5 (commit)
       via  cd342dae58399be6cdfad55a466a76ee385ccb08 (commit)
       via  f9e81512329b71d6b5d94bafa789c63e763b2a72 (commit)
       via  226dc3ee718e1320079d6c6d8642e0f0dda1bdef (commit)
       via  962a91763b9ef79e887e52e22fa23462ff7d680e (commit)
       via  170936d47b2e9ad3d5c3ceabf86026fca9795150 (commit)
       via  dbf32272f3b76b90678add39038fb6978c03ab3e (commit)
       via  295732d42d2b0a9641edfa352087033d8eff2794 (commit)
       via  8279efec0dae2291665a99e4d489e8e5ef7a51c1 (commit)
       via  9f89f07adcc9ccdde454016f037076e04eb791c1 (commit)
       via  fdefb47da0a5d7203496738ba03d4e1737e8149e (commit)
       via  93a5d45d9c1aa90249494608b8c2829059cc3b28 (commit)
       via  c1f5fb059e9c272dedc27a3f14fa8ed2fec71b95 (commit)
       via  fd1ae8e05771b151877ae3c082a7b3e3b32a20c7 (commit)
       via  21887dffc4cd692ce23bfff1685fba0e2c1e55b0 (commit)
       via  466a968426ed9062d86239560492edf7dc72ee02 (commit)
       via  a59f28758abdb92721e010956bd421148643377b (commit)
       via  e09910d37b783b182ae2dc83f6cb272bff68cbb6 (commit)
       via  648a187c5d7181019dc19531a1057bc3e6f70e96 (commit)
       via  16b7feca0339f67acae30eb67d913bd3ef0298be (commit)
       via  ff5154291678973eaa0483518302b74a62f0acba (commit)
       via  c4c93896137dd936066cd1a714569468bf248451 (commit)
       via  9bab697bc984a6565a6f0dfe8a981f4809edc91c (commit)
       via  ab406229e29b7cfc470142ee0166086bf70790a3 (commit)
       via  e24f557e8208f43a8ade0855395c87b175bc351c (commit)
       via  3f93372ba9416c9d759ea0c6d8981837c036448e (commit)
       via  fda23d6cf412c2a90df325c244f79811d939d3c7 (commit)
       via  32b1e0701a9b138321e510a432c5cdd49fa336c6 (commit)
       via  cd4fd339a084dbfb1e2d35d5c008260de9d48572 (commit)
       via  48ee64bfbde99ce88eb305d2a751283b42c826ad (commit)
       via  cfecb1acb98f45a12864b7730ea58afbeb674c7b (commit)
       via  9ab6902f20b57452eaecf8f737d37f8dedcd623a (commit)
       via  8c57956e16dd09b528cd11dbf4c2fa51e48da359 (commit)
       via  e84f2aa5e9e493aa7dadfbd3b31753b5837d9069 (commit)
       via  dabf62d5444fe3a1e55e72aa393e0dddf188df7b (commit)
       via  ca3d2d1badee8e5e6d3c1f73fb29afdcc7692fa6 (commit)
       via  94ec743d73153258d8a231e2e5126749ea00e3c8 (commit)
       via  dca136175cf0dde67a63f40953187ca60f90caad (commit)
       via  625aea76304c024102cb5065f910e5121b1641f7 (commit)
       via  a4c51111cc0fc28c6517a11f8ae88682ab8e6996 (commit)
       via  0878c77ba4bcbaeb509f2bb7c2d52ee62864dadc (commit)
       via  efeb506e624945c6f21755621897a088715045b7 (commit)
       via  fda514f6b5ff65648709273dc62f960d85f4e066 (commit)
       via  2afbc7d3564b16d49043d48fe5ed9dd343311861 (commit)
       via  ce28b51d36567d63b5258648f7fbe406baaa5677 (commit)
       via  9753568c850855beecaabf500aea33483369d64f (commit)
       via  0ad9b8c8482a134af7c47b64b412f642d08ce642 (commit)
       via  5a2d958780a4a671cd8df9080d99ff95dd16772d (commit)
       via  3a330862f1357a4e0edd570de5896785029f4530 (commit)
       via  075e3787986676c7491f157931b6f7da1773db0a (commit)
       via  7d2f07481169780071bf564223a20a219b550385 (commit)
       via  d5e189cf1573446503a4fafa3e909db60eb04623 (commit)
       via  d85912df5ef89ff95c3653403503f61d120a0761 (commit)
       via  0b6937d0e075e1192c41891ae138532f2c733b47 (commit)
       via  5371b694b6cc564c3f1899a935769dd024f38e56 (commit)
       via  837002896937febe208c141912fc4f8c3beaa2ab (commit)
       via  06d6be693064252ed2535fc8685ca4e7b8db0989 (commit)
       via  36dc8dd6f15a42f401ffa32829ed7c436e529eb3 (commit)
       via  cb1c34cd2ffb876819441b4869a66a4cb500a8ba (commit)
       via  c5117dc4d2fd89f1a66849713c6a3cd51735699f (commit)
       via  5d7004d0ac4fe553a61fd2eb99a8af3eb7324956 (commit)
       via  fc0fe98a085ece85e143188c5647740f95d347bc (commit)
       via  456933355bf3bc2db5a6c52ba4dc6d8e826ce6e1 (commit)
       via  a3a4e317a91c075f0d16de7d16cc652e508df101 (commit)
       via  96086ea69576acae7d59e1d7665f622bd526c7c1 (commit)
       via  7c229ebaca82e06899126f9b364fe524ec6d4b56 (commit)
       via  599ec7f889bba386c838ec85735b203514905d9d (commit)
       via  657349ae281dcdf737b187d0be2cd7d0e4fa92a7 (commit)
       via  a7505fac495a9746d8bf3e9a2f4a3aa8541b85c2 (commit)
       via  2cd7de7f848f743ee31c356fd7edc9231ba6ca3a (commit)
       via  1468dd9e7bc1e0a045cdab88d1db815cc7e2bd52 (commit)
       via  3582ccf1eb2093d34e944bcda5ea2069158349dc (commit)
       via  d64cd3aa3d095ad5f0e8054e8b2b2cabdab18d3f (commit)
       via  0b7c39d9dcd44dfba0caf6e9353f00f47bbe7e9c (commit)
       via  d23827556ec500284bd155cdb731213343030f53 (commit)
      from  c37c596c844d35dc25eb729a99666948c6af8a6b (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 95b7c29f155b8fa21cf85c6d3afbe3f510db83d1
Merge: c37c596c844d35dc25eb729a99666948c6af8a6b a677ae91c9d7a5bd2ef8574f84e9f33f90c45e44
Author: Stephen Morris <stephen at isc.org>
Date:   Fri Dec 9 13:25:17 2011 +0000

    [1198] Merge branch 'master' into trac1198
    
    Conflicts:
    	src/lib/datasrc/database.cc
    	src/lib/datasrc/datasrc_messages.mes

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                          |  199 ++++++-
 Makefile.am                                        |    2 +-
 compatcheck/Makefile.am                            |    8 +
 compatcheck/README                                 |    5 +
 compatcheck/sqlite3-difftbl-check.py.in            |   60 ++
 configure.ac                                       |  114 +++-
 doc/guide/bind10-guide.html                        |  241 +++++++--
 doc/guide/bind10-guide.txt                         |  205 ++++++-
 doc/guide/bind10-guide.xml                         |  245 ++++++++-
 doc/guide/bind10-messages.html                     |  482 +++++++++++++---
 doc/guide/bind10-messages.xml                      |  549 +++++++++++++++---
 src/bin/auth/auth_srv.cc                           |   24 +-
 src/bin/auth/benchmarks/Makefile.am                |    2 +-
 src/bin/auth/query.cc                              |   36 ++-
 src/bin/auth/query.h                               |   12 +
 src/bin/auth/tests/auth_srv_unittest.cc            |  155 ++++-
 src/bin/auth/tests/query_unittest.cc               |  126 ++++-
 src/bin/bind10/bind10.8                            |  220 +++++++-
 src/bin/bind10/bind10.xml                          |  200 +++++++-
 src/bin/bind10/bind10_messages.mes                 |   11 +
 src/bin/bind10/bind10_src.py.in                    |  460 +++++++++------
 src/bin/bind10/bob.spec                            |    4 +-
 src/bin/bind10/tests/bind10_test.py.in             |  487 +++++++++++++++-
 src/bin/dhcp6/.gitignore                           |    1 +
 src/bin/dhcp6/dhcp6_srv.cc                         |   30 +-
 src/bin/dhcp6/dhcp6_srv.h                          |   11 +-
 src/bin/dhcp6/iface_mgr.cc                         |  436 ++++++++++----
 src/bin/dhcp6/iface_mgr.h                          |  269 ++++++++--
 src/bin/dhcp6/tests/Makefile.am                    |    4 +-
 src/bin/dhcp6/tests/dhcp6_srv_unittest.cc          |   23 +-
 src/bin/dhcp6/tests/iface_mgr_unittest.cc          |  264 +++++++--
 src/bin/resolver/tests/Makefile.am                 |    2 +-
 src/bin/xfrin/tests/Makefile.am                    |    1 +
 src/bin/xfrin/tests/testdata/example.com.sqlite3   |  Bin 11264 -> 12288 bytes
 src/bin/xfrin/tests/xfrin_test.py                  |  352 ++++++++++--
 src/bin/xfrin/xfrin.py.in                          |  342 ++++++++----
 src/bin/xfrin/xfrin_messages.mes                   |   69 ++-
 src/bin/xfrout/b10-xfrout.8                        |   13 +
 src/bin/xfrout/b10-xfrout.xml                      |   25 +
 src/bin/xfrout/tests/Makefile.am                   |    4 +-
 src/bin/xfrout/tests/testdata/creatediff.py        |   58 ++
 src/bin/xfrout/tests/testdata/example.com          |    2 +-
 src/bin/xfrout/tests/testdata/test.sqlite3         |  Bin 11264 -> 12288 bytes
 src/bin/xfrout/tests/xfrout_test.py.in             |  602 ++++++++++++++++----
 src/bin/xfrout/xfrout.py.in                        |  320 ++++++++---
 src/bin/xfrout/xfrout_messages.mes                 |  100 +++-
 src/lib/asiodns/io_fetch.cc                        |    4 -
 src/lib/asiolink/Makefile.am                       |    3 +
 src/lib/cryptolink/Makefile.am                     |    3 +-
 src/lib/cryptolink/tests/Makefile.am               |    4 +-
 src/lib/datasrc/Makefile.am                        |   13 +-
 src/lib/datasrc/client.h                           |   51 ++
 src/lib/datasrc/database.cc                        |  117 ++++-
 src/lib/datasrc/database.h                         |   17 +-
 src/lib/datasrc/datasrc_config.h.pre.in            |   31 +
 src/lib/datasrc/datasrc_messages.mes               |   63 ++
 src/lib/datasrc/factory.cc                         |   53 ++-
 src/lib/datasrc/factory.h                          |   11 +-
 src/lib/datasrc/memory_datasrc.cc                  |    7 +
 src/lib/datasrc/memory_datasrc.h                   |    4 +
 src/lib/datasrc/sqlite3_accessor.h                 |    1 -
 src/lib/datasrc/tests/Makefile.am                  |   25 +-
 src/lib/datasrc/tests/client_unittest.cc           |    7 +
 src/lib/datasrc/tests/database_unittest.cc         |  377 +++++++++++--
 src/lib/datasrc/tests/factory_unittest.cc          |   65 +++
 src/lib/datasrc/tests/testdata/Makefile.am         |    5 -
 src/lib/datasrc/zone.h                             |   92 +++
 src/lib/dhcp/Makefile.am                           |    1 +
 src/lib/dhcp/libdhcp.cc                            |   12 +-
 src/lib/dhcp/option.cc                             |   31 +-
 src/lib/dhcp/option.h                              |   27 +-
 src/lib/dhcp/option4_addrlst.cc                    |  135 +++++
 src/lib/dhcp/option4_addrlst.h                     |  167 ++++++
 src/lib/dhcp/option6_addrlst.cc                    |    6 +-
 src/lib/dhcp/option6_addrlst.h                     |   19 +-
 src/lib/dhcp/option6_ia.cc                         |    6 +-
 src/lib/dhcp/option6_ia.h                          |    2 +-
 src/lib/dhcp/option6_iaaddr.cc                     |    4 +-
 src/lib/dhcp/option6_iaaddr.h                      |    3 +-
 src/lib/dhcp/pkt4.cc                               |   53 +-
 src/lib/dhcp/pkt4.h                                |   24 +-
 src/lib/dhcp/tests/Makefile.am                     |    1 +
 src/lib/dhcp/tests/option4_addrlst_unittest.cc     |  273 +++++++++
 src/lib/dhcp/tests/option_unittest.cc              |    2 +
 src/lib/dhcp/tests/pkt4_unittest.cc                |   17 +-
 src/lib/dns/Makefile.am                            |    3 +
 src/lib/dns/python/Makefile.am                     |    1 +
 src/lib/dns/python/pydnspp.cc                      |   17 +
 src/lib/dns/python/rdata_python.cc                 |  210 +++++---
 src/lib/dns/python/rrset_python.cc                 |   97 ++--
 src/lib/dns/python/serial_python.cc                |  281 +++++++++
 src/lib/dns/python/serial_python.h                 |   64 ++
 src/lib/dns/python/tests/Makefile.am               |    1 +
 src/lib/dns/python/tests/rdata_python_test.py      |    8 +
 src/lib/dns/python/tests/serial_python_test.py     |  111 ++++
 src/lib/dns/rdata/generic/soa_6.cc                 |    4 +-
 src/lib/dns/rdata/generic/soa_6.h                  |    3 +-
 src/lib/dns/serial.cc                              |   76 +++
 src/lib/dns/serial.h                               |  155 +++++
 src/lib/dns/tests/Makefile.am                      |    7 +-
 src/lib/dns/tests/rdata_soa_unittest.cc            |    2 +-
 src/lib/dns/tests/serial_unittest.cc               |  179 ++++++
 src/lib/exceptions/exceptions.h                    |   11 +
 src/lib/log/Makefile.am                            |    3 +-
 src/lib/log/tests/Makefile.am                      |   10 +-
 src/lib/nsas/nameserver_entry.cc                   |   24 +-
 src/lib/nsas/nsas_messages.mes                     |   29 +-
 src/lib/python/isc/bind10/Makefile.am              |    3 +-
 src/lib/python/isc/bind10/component.py             |   64 ++-
 src/lib/python/isc/bind10/socket_cache.py          |  302 ++++++++++
 src/lib/python/isc/bind10/special_component.py     |   20 +-
 src/lib/python/isc/bind10/tests/Makefile.am        |    2 +-
 src/lib/python/isc/bind10/tests/component_test.py  |  125 ++++-
 .../python/isc/bind10/tests/sockcreator_test.py    |    3 -
 .../python/isc/bind10/tests/socket_cache_test.py   |  396 +++++++++++++
 src/lib/python/isc/datasrc/Makefile.am             |    3 +
 src/lib/python/isc/datasrc/client_inc.cc           |   56 ++
 src/lib/python/isc/datasrc/client_python.cc        |   58 ++-
 src/lib/python/isc/datasrc/datasrc.cc              |   41 ++
 src/lib/python/isc/datasrc/finder_inc.cc           |    5 +-
 src/lib/python/isc/datasrc/finder_python.cc        |   29 +-
 src/lib/python/isc/datasrc/journal_reader_inc.cc   |   80 +++
 .../python/isc/datasrc/journal_reader_python.cc    |  200 +++++++
 src/lib/python/isc/datasrc/journal_reader_python.h |   47 ++
 src/lib/python/isc/datasrc/sqlite3_ds.py           |    8 +
 src/lib/python/isc/datasrc/tests/Makefile.am       |    2 +
 src/lib/python/isc/datasrc/tests/datasrc_test.py   |  216 ++++++--
 .../datasrc/tests/testdata/test.sqlite3.nodiffs    |  Bin 43008 -> 43008 bytes
 src/lib/python/isc/log/log.cc                      |  188 +++----
 src/lib/python/isc/log/tests/log_test.py           |   31 +
 src/lib/python/isc/notify/notify_out.py            |   15 +-
 src/lib/python/isc/notify/tests/Makefile.am        |    1 +
 src/lib/python/isc/testutils/Makefile.am           |    2 +-
 src/lib/python/isc/testutils/rrset_utils.py        |   82 +++
 src/lib/python/isc/xfrin/diff.py                   |   18 +-
 src/lib/python/isc/xfrin/libxfrin_messages.mes     |   10 +
 src/lib/python/isc/xfrin/tests/diff_tests.py       |   26 +-
 src/lib/resolve/recursive_query.cc                 |  152 ++++--
 src/lib/resolve/resolve_messages.mes               |   97 +++-
 src/lib/resolve/response_classifier.h              |    4 +-
 src/lib/xfr/Makefile.am                            |    2 +
 src/lib/xfr/tests/Makefile.am                      |   25 +
 src/lib/xfr/tests/client_test.cc                   |   37 ++
 src/lib/{acl => xfr}/tests/run_unittests.cc        |    0 
 src/lib/xfr/xfrout_client.cc                       |    9 +-
 tests/lettuce/features/terrain/bind10_control.py   |    7 +-
 tests/lettuce/features/xfrin_bind10.feature        |    1 +
 tests/tools/badpacket/badpacket.cc                 |    2 +
 148 files changed, 10308 insertions(+), 1868 deletions(-)
 create mode 100644 compatcheck/Makefile.am
 create mode 100644 compatcheck/README
 create mode 100755 compatcheck/sqlite3-difftbl-check.py.in
 create mode 100755 src/bin/xfrout/tests/testdata/creatediff.py
 create mode 100644 src/lib/datasrc/datasrc_config.h.pre.in
 create mode 100644 src/lib/dhcp/option4_addrlst.cc
 create mode 100644 src/lib/dhcp/option4_addrlst.h
 create mode 100644 src/lib/dhcp/tests/option4_addrlst_unittest.cc
 create mode 100644 src/lib/dns/python/serial_python.cc
 create mode 100644 src/lib/dns/python/serial_python.h
 create mode 100644 src/lib/dns/python/tests/serial_python_test.py
 create mode 100644 src/lib/dns/serial.cc
 create mode 100644 src/lib/dns/serial.h
 create mode 100644 src/lib/dns/tests/serial_unittest.cc
 create mode 100644 src/lib/python/isc/bind10/socket_cache.py
 create mode 100644 src/lib/python/isc/bind10/tests/socket_cache_test.py
 create mode 100644 src/lib/python/isc/datasrc/journal_reader_inc.cc
 create mode 100644 src/lib/python/isc/datasrc/journal_reader_python.cc
 create mode 100644 src/lib/python/isc/datasrc/journal_reader_python.h
 copy src/lib/{ => python/isc}/datasrc/tests/testdata/test.sqlite3.nodiffs (100%)
 create mode 100644 src/lib/python/isc/testutils/rrset_utils.py
 create mode 100644 src/lib/xfr/tests/Makefile.am
 create mode 100644 src/lib/xfr/tests/client_test.cc
 copy src/lib/{acl => xfr}/tests/run_unittests.cc (100%)

-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index c04e845..c59c9ec 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,34 +1,195 @@
+342.	[bug]		stephen
+	In the resolver, a FORMERR received from an upstream nameserver
+	now rsults in a SERVFAIL being returned as a response to the original
+	query.  Additional debug messages added to distinguish between
+	different errors in packets received from upstream nameservers.
+	(Trac #1383, git 9b2b249d23576c999a65d8c338e008cabe45f0c9)
+
+341.	[func]		tomek
+	libdhcp++: Support for handling both IPv4 and IPv6 added.
+	Also added support for binding IPv4 sockets.
+	(Trac #1238, git 86a4ce45115dab4d3978c36dd2dbe07edcac02ac)
+
+340.	[build]		jelte
+	Fixed several linker issues related to recent gcc versions, botan
+	and gtest.
+	(Trac #1442, git 91fb141bfb3aadfdf96f13e157a26636f6e9f9e3)
+
+339.	[bug]		jinmei
+	libxfr, used by b10-auth to share TCP sockets with b10-xfrout,
+	incorrectly propagated ASIO specific exceptions to the application
+	if the given file name was too long.  This could lead to
+	unexpected shut down of b10-auth.
+	(Trac #1387, git a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2)
+
+338.	[bug]		jinmei
+	b10-xfrin didn't check SOA serials of SOA and IXFR responses,
+	which resulted in unnecessary transfer or unexpected IXFR
+	timeouts (these issues were not overlooked but deferred to be
+	fixed until #1278 was completed).  Validation on responses to SOA
+	queries were tightened, too.
+	(Trac #1299, git 6ff03bb9d631023175df99248e8cc0cda586c30a)
+
+337.	[func]		tomek
+	libdhcp++: Support for DHCPv4 option that can store a single
+	address or a list of IPv4 addresses added. Support for END option
+	added.
+	(Trac #1350, git cc20ff993da1ddb1c6e8a98370438b45a2be9e0a)
+
+336.	[func]		jelte
+	libdns++ (and its python wrapper) now includes a class Serial, for 
+	SOA SERIAL comparison and addition. Operations on instances of this 
+	class follow the specification from RFC 1982. 
+	Rdata::SOA::getSerial() now returns values of this type (and not 
+	uint32_t).
+	(Trac #1278, git 2ae72d76c74f61a67590722c73ebbf631388acbd)
+
+335.	[bug]*		jelte
+	The DataSourceClientContainer class that dynamically loads 
+	datasource backend libraries no longer provides just a .so file name 
+	to its call to dlopen(), but passes it an absolute path. This means 
+	that it is no longer an system implementation detail that depends on 
+	[DY]LD_LIBRARY_PATH which file is chosen, should there be multiple 
+	options (for instance, when test-running a new build while a 
+	different version is installed).
+	These loadable libraries are also no longer installed in the default 
+	library path, but in a subdirectory of the libexec directory of the 
+	target ($prefix/libexec/[version]/backends).
+	This also removes the need to handle b10-xfin and b10-xfrout as 
+	'special' hardcoded components, and they are now started as regular 
+	components as dictated by the configuration of the boss process.
+	(Trac #1292, git 83ce13c2d85068a1bec015361e4ef8c35590a5d0)
+
+334.	[bug]		jinmei
+	b10-xfrout could potentially create an overflow response message
+	(exceeding the 64KB max) or could create unnecessarily small
+	messages.  The former was actually unlikely to happen due to the
+	effect of name compression, and the latter was marginal and at least
+	shouldn't cause an interoperability problem, but these were still
+	potential problems and were fixed.
+	(Trac #1389, git 3fdce88046bdad392bd89ea656ec4ac3c858ca2f)
+
+333.    [bug]		dvv
+	Solaris needs "-z now" to force non-lazy binding and prevent g++ static
+	initialization code from deadlocking.
+	(Trac #1439, git c789138250b33b6b08262425a08a2a0469d90433)
+
+332.    [bug]		vorner
+	C++ exceptions in the isc.dns.Rdata wrapper are now converted
+	to python ones instead of just aborting the interpretter.
+	(Trac #1407, git 5b64e839be2906b8950f5b1e42a3fadd72fca033)
+
+bind10-devel-20111128 released on November 28, 2011
+
+331.	[bug]		shane
+	Fixed a bug in data source library where a zone with more labels
+	than an out-of-bailiwick name server would cause an exception to
+	be raised.
+	(Trac #1430, git 81f62344db074bc5eea3aaf3682122fdec6451ad)
+
+330.	[bug]		jelte
+	Fixed a bug in b10-auth where it would sometimes fail because it
+	tried to check for queued msgq messages before the session was
+	fully running.
+	(git c35d0dde3e835fc5f0a78fcfcc8b76c74bc727ca)
+
+329.	[doc]		vorner, jreed
+	Document the bind10 run control configuration in guide and
+	manual page.
+	(Trac #1341, git c1171699a2b501321ab54207ad26e5da2b092d63)
+
+328.	[func]		jelte
+	b10-auth now passes IXFR requests on to b10-xfrout, and no longer
+	responds to them with NOTIMPL.
+	(Trac #1390, git ab3f90da16d31fc6833d869686e07729d9b8c135)
+
+327.	[func]		jinmei
+	b10-xfrout now supports IXFR.  (Right now there is no user
+	configurable parameter about this feature; b10-xfrout will
+	always respond to IXFR requests according to RFC1995).
+	(Trac #1371 and #1372, git 80c131f5b0763753d199b0fb9b51f10990bcd92b)
+
+326.	[build]*	jinmei
+	Added a check script for the SQLite3 schema version.  It will be
+	run at the beginning of 'make install', and if it detects an old
+	version of schema, installation will stop.  You'll then need to
+	upgrade the database file by following the error message.
+	(Trac #1404, git a435f3ac50667bcb76dca44b7b5d152f45432b57)
+
+325.	[func]		jinmei
+	Python isc.datasrc: added interfaces for difference management:
+	DataSourceClient.get_updater() now has the 'journaling' parameter
+	to enable storing diffs to the data source, and a new class
+	ZoneJournalReader was introduced to retrieve them, which can be
+	created by the new DataSourceClient.get_journal_reader() method.
+	(Trac #1333, git 3e19362bc1ba7dc67a87768e2b172c48b32417f5,
+	git 39def1d39c9543fc485eceaa5d390062edb97676)
+
+324.	[bug]		jinmei
+	Fixed reference leak in the isc.log Python module.  Most of all
+	BIND 10 Python programs had memory leak (even though the pace of
+	leak may be slow) due to this bug.
+	(Trac #1359, git 164d651a0e4c1059c71f56b52ea87ac72b7f6c77)
+
+323.	[bug]		jinmei
+	b10-xfrout incorrectly skipped adding TSIG RRs to some
+	intermediate responses (when TSIG is to be used for the
+	responses).  While RFC2845 optionally allows to skip intermediate
+	TSIGs (as long as the digest for the skipped part was included
+	in a later TSIG), the underlying TSIG API doesn't support this
+	mode of signing.
+	(Trac #1370, git 76fb414ea5257b639ba58ee336fae9a68998b30d)
+
+322.	[func]		jinmei
+	datasrc: Added C++ API for retrieving difference of two versions
+	of a zone.  A new ZoneJournalReader class was introduced for this
+	purpose, and a corresponding factory method was added to
+	DataSourceClient.
+	(Trac #1332, git c1138d13b2692fa3a4f2ae1454052c866d24e654)
+
+321.	[func]*		jinmei
+	b10-xfrin now installs IXFR differences into the underlying data
+	source (if it supports journaling) so that the stored differences
+	can be used for subsequent IXFR-out transactions.
+	Note: this is a backward incompatibility change for older sqlite3
+	database files.  They need to be upgraded to have a "diffs" table.
+	(Trac #1376, git 1219d81b49e51adece77dc57b5902fa1c6be1407)
+
 320.	[func]*		vorner
-	The --brittle switch was removed from the bind10 executable. It didn't
-	work after the #213 change and the same effect can be accomplished by
-	declaring all components as core.
+	The --brittle switch was removed from the bind10 executable.
+	It didn't work after change #316 (Trac #213) and the same
+	effect can be accomplished by declaring all components as core.
 	(Trac #1340, git f9224368908dd7ba16875b0d36329cf1161193f0)
 
 319.	[func]		naokikambe
 	b10-stats-httpd was updated. In addition of the access to all
-	statistics items of all modules, the specified item or the items of the
-	specified module name can be accessed. For example, the URI requested
-	by using the feature is showed as "/bind10/statistics/xml/Auth" or
+	statistics items of all modules, the specified item or the items
+	of the specified module name can be accessed.  For example, the
+	URI requested by using the feature is showed as
+	"/bind10/statistics/xml/Auth" or
 	"/bind10/statistics/xml/Auth/queries.tcp". The list of all possible
-	module names and all possible item names can be showed in the root
-	document, whose URI is "/bind10/statistics/xml". This change is not
-	only for the XML documents but also is for the XSD and XSL documents.
+	module names and all possible item names can be showed in the
+	root document, whose URI is "/bind10/statistics/xml".  This change
+	is not only for the XML documents but also is for the XSD and
+	XSL documents.
 	(Trac #917, git b34bf286c064d44746ec0b79e38a6177d01e6956)
 
-318.    [func]      stephen
-	Add C++ API for accessing zone difference information in database-based
-	data sources.
+318.	[func]		stephen
+	Add C++ API for accessing zone difference information in
+	database-based data sources.
 	(Trac #1330, git 78770f52c7f1e7268d99e8bfa8c61e889813bb33)
 
-317.    [func]      vorner
-	datasrc: the getUpdater method of DataSourceClient supports an optional
-	'journaling' parameter to indicate the generated updater to store diffs.
-	The database based derived class implements this extension.
+317.	[func]		vorner
+	datasrc: the getUpdater method of DataSourceClient supports an
+	optional 'journaling' parameter to indicate the generated updater
+	to store diffs.  The database based derived class implements this
+	extension.
 	(Trac #1331, git 713160c9bed3d991a00b2ea5e7e3e7714d79625d)
 
 316.	[func]*		vorner
-	The configuration of what parts of the system run is more flexible now.
-	Everything that should run must have an entry in Boss/components.
+	The configuration of what parts of the system run is more
+	flexible now.  Everything that should run must have an
+	entry in Boss/components.
 	(Trac #213, git 08e1873a3593b4fa06754654d22d99771aa388a6)
 
 315.	[func]		tomek
@@ -98,7 +259,7 @@
 	automatically.
 	(Trac #1279, git cd3588c9020d0310f949bfd053c4d3a4bd84ef88)
 
-306.	[bug]		Stephen
+306.	[bug]		stephen
 	Boss process now waits for the configuration manager to initialize
 	itself before continuing with startup.  This fixes a race condition
 	whereby the Boss could start the configuration manager and then
diff --git a/Makefile.am b/Makefile.am
index 50aa6b9..cc91a56 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = doc src tests
+SUBDIRS = compatcheck doc src tests
 USE_LCOV=@USE_LCOV@
 LCOV=@LCOV@
 GENHTML=@GENHTML@
diff --git a/compatcheck/Makefile.am b/compatcheck/Makefile.am
new file mode 100644
index 0000000..029578d
--- /dev/null
+++ b/compatcheck/Makefile.am
@@ -0,0 +1,8 @@
+noinst_SCRIPTS = sqlite3-difftbl-check.py
+
+# We're going to abuse install-data-local for a pre-install check.
+# This is to be considered a short term hack and is expected to be removed
+# in a near future version.
+install-data-local:
+	$(PYTHON) sqlite3-difftbl-check.py \
+	$(localstatedir)/$(PACKAGE)/zone.sqlite3
diff --git a/compatcheck/README b/compatcheck/README
new file mode 100644
index 0000000..8381e60
--- /dev/null
+++ b/compatcheck/README
@@ -0,0 +1,5 @@
+This directory is a collection of compatibility checker programs.
+They will be run before any other installation attempts on 'make install'
+to see if the installation causes any substantial compatibility problems
+with existing configuratons.  If any checker program finds an issue,
+'make install' will stop at that point.
diff --git a/compatcheck/sqlite3-difftbl-check.py.in b/compatcheck/sqlite3-difftbl-check.py.in
new file mode 100755
index 0000000..e3b7b91
--- /dev/null
+++ b/compatcheck/sqlite3-difftbl-check.py.in
@@ -0,0 +1,60 @@
+#!@PYTHON@
+
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import os, sqlite3, sys
+from optparse import OptionParser
+
+usage = 'usage: %prog [options] db_file'
+parser = OptionParser(usage=usage)
+parser.add_option("-u", "--upgrade", action="store_true",
+                  dest="upgrade", default=False,
+                  help="Upgrade the database file [default: %default]")
+(options, args) = parser.parse_args()
+if len(args) == 0:
+    parser.error('missing argument')
+
+db_file = args[0]
+
+# If the file doesn't exist, there's nothing to do
+if not os.path.exists(db_file):
+    sys.exit(0)
+
+conn = sqlite3.connect(db_file)
+cur = conn.cursor()
+try:
+    # This can be anything that works iff the "diffs" table exists
+    cur.execute('SELECT name FROM diffs DESC LIMIT 1')
+except sqlite3.OperationalError as ex:
+    # If it fails with 'no such table', create a new one or fail with
+    # warning depending on the --upgrade command line option.
+    if str(ex) == 'no such table: diffs':
+        if options.upgrade:
+            cur.execute('CREATE TABLE diffs (id INTEGER PRIMARY KEY, ' +
+                        'zone_id INTEGER NOT NULL, ' +
+                        'version INTEGER NOT NULL, ' +
+                        'operation INTEGER NOT NULL, ' +
+                        'name STRING NOT NULL COLLATE NOCASE, ' +
+                        'rrtype STRING NOT NULL COLLATE NOCASE, ' +
+                        'ttl INTEGER NOT NULL, rdata STRING NOT NULL)')
+        else:
+            sys.stdout.write('Found an older version of SQLite3 DB file: ' +
+                             db_file + '\n' + "Perform '" + os.getcwd() +
+                             "/sqlite3-difftbl-check.py --upgrade " +
+                             db_file + "'\n" +
+                             'before continuing install.\n')
+            sys.exit(1)
+conn.close()
diff --git a/configure.ac b/configure.ac
index 2692ddb..e370e21 100644
--- a/configure.ac
+++ b/configure.ac
@@ -2,7 +2,7 @@
 # Process this file with autoconf to produce a configure script.
 
 AC_PREREQ([2.59])
-AC_INIT(bind10-devel, 20111021, bind10-dev at isc.org)
+AC_INIT(bind10-devel, 20111129, bind10-dev at isc.org)
 AC_CONFIG_SRCDIR(README)
 AM_INIT_AUTOMAKE
 AC_CONFIG_HEADERS([config.h])
@@ -96,6 +96,8 @@ case "$host" in
 	# Solaris requires special definitions to get some standard libraries
 	# (e.g. getopt(3)) available with common used header files.
 	CPPFLAGS="$CPPFLAGS -D_XPG4_2 -D__EXTENSIONS__"
+	# "now" binding is necessary to prevent deadlocks in C++ static initialization code
+	LDFLAGS="$LDFLAGS -z now"
 	;;
 *-apple-darwin*)
 	# libtool doesn't work perfectly with Darwin: libtool embeds the
@@ -478,23 +480,33 @@ else
     fi
 fi
 
-BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
+BOTAN_LIBS=`${BOTAN_CONFIG} --libs`
 BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
 
 # We expect botan-config --libs to contain -L<path_to_libbotan>, but
 # this is not always the case.  As a heuristics workaround we add
-# -L`botan-config --prefix/lib` in this case.  Same for BOTAN_INCLUDES
-# (but using include instead of lib) below.
+# -L`botan-config --prefix/lib` in this case (if not present already).
+# Same for BOTAN_INCLUDES (but using include instead of lib) below.
 if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
-    echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
-        BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
+    echo ${BOTAN_LIBS} | grep -- -L > /dev/null || \
+        BOTAN_LIBS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LIBS}"
     echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
         BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
 fi
+
+# botan-config script (and the way we call pkg-config) returns -L and -l
+# as one string, but we need them in separate values
+BOTAN_LDFLAGS=
+BOTAN_NEWLIBS=
+for flag in ${BOTAN_LIBS}; do
+    BOTAN_LDFLAGS="${BOTAN_LDFLAGS} `echo $flag | sed -ne '/^\(\-L\)/p'`"
+    BOTAN_LIBS="${BOTAN_LIBS} `echo $flag | sed -ne '/^\(\-l\)/p'`"
+done
+
 # See python_rpath for some info on why we do this
 if test $rpath_available = yes; then
     BOTAN_RPATH=
-    for flag in ${BOTAN_LDFLAGS}; do
+    for flag in ${BOTAN_LIBS}; do
             BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
     done
 AC_SUBST(BOTAN_RPATH)
@@ -510,13 +522,13 @@ AC_SUBST(BOTAN_RPATH)
 fi
 
 AC_SUBST(BOTAN_LDFLAGS)
+AC_SUBST(BOTAN_LIBS)
 AC_SUBST(BOTAN_INCLUDES)
 
 CPPFLAGS_SAVED=$CPPFLAGS
 CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$BOTAN_LDFLAGS $LDFLAGS"
-
+LIBS_SAVED="$LIBS"
+LIBS="$LIBS $BOTAN_LIBS"
 AC_CHECK_HEADERS([botan/botan.h],,AC_MSG_ERROR([Missing required header files.]))
 AC_LINK_IFELSE(
         [AC_LANG_PROGRAM([#include <botan/botan.h>
@@ -531,7 +543,7 @@ AC_LINK_IFELSE(
          AC_MSG_ERROR([Needs Botan library 1.8 or higher])]
 )
 CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
 
 # Check for log4cplus
 log4cplus_path="yes"
@@ -543,7 +555,7 @@ if test "${log4cplus_path}" = "no" ; then
     AC_MSG_ERROR([Need log4cplus])
 elif test "${log4cplus_path}" != "yes" ; then
   LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
-  LOG4CPLUS_LDFLAGS="-L${log4cplus_path}/lib"
+  LOG4CPLUS_LIBS="-L${log4cplus_path}/lib"
 else
 # If not specified, try some common paths.
 	log4cplusdirs="/usr/local /usr/pkg /opt /opt/local"
@@ -551,21 +563,21 @@ else
 	do
 		if test -f $d/include/log4cplus/logger.h; then
 			LOG4CPLUS_INCLUDES="-I$d/include"
-			LOG4CPLUS_LDFLAGS="-L$d/lib"
+			LOG4CPLUS_LIBS="-L$d/lib"
 			break
 		fi
 	done
 fi
 
-LOG4CPLUS_LDFLAGS="$LOG4CPLUS_LDFLAGS -llog4cplus $MULTITHREADING_FLAG"
+LOG4CPLUS_LIBS="$LOG4CPLUS_LIBS -llog4cplus $MULTITHREADING_FLAG"
 
-AC_SUBST(LOG4CPLUS_LDFLAGS)
+AC_SUBST(LOG4CPLUS_LIBS)
 AC_SUBST(LOG4CPLUS_INCLUDES)
 
 CPPFLAGS_SAVED=$CPPFLAGS
 CPPFLAGS="$LOG4CPLUS_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$LOG4CPLUS_LDFLAGS $LDFLAGS"
+LIBS_SAVED="$LIBS"
+LIBS="$LOG4CPLUS_LIBS $LIBS"
 
 AC_CHECK_HEADERS([log4cplus/logger.h],,AC_MSG_ERROR([Missing required header files.]))
 AC_LINK_IFELSE(
@@ -580,7 +592,7 @@ AC_LINK_IFELSE(
 )
 
 CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
 
 #
 # Configure Boost header path
@@ -673,6 +685,13 @@ else
     AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test "${use_boost_threads}" = "yes")
 fi
 
+# I can't get some of the #include <asio.hpp> right without this
+# TODO: find the real cause of asio/boost wanting pthreads
+# (this currently only occurs for src/lib/cc/session_unittests)
+PTHREAD_LDFLAGS=
+AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
+AC_SUBST(PTHREAD_LDFLAGS)
+AC_SUBST(MULTITHREADING_FLAG)
 
 #
 # Check availability of gtest, which will be used for unit tests.
@@ -709,6 +728,48 @@ then
 				GTEST_LDFLAGS="-L$dir/lib"
 				GTEST_LDADD="-lgtest"
 				GTEST_FOUND="true"
+				# There is no gtest-config script on this
+				# system, which is supposed to inform us
+				# whether we need pthreads as well (a 
+				# gtest compile-time option). So we still
+				# need to test that manually.
+				CPPFLAGS_SAVED="$CPPFLAGS"
+				CPPFLAGS="$CPPFLAGS $GTEST_INCLUDES"
+				LDFLAGS_SAVED="$LDFLAGS"
+				LDFLAGS="$LDFLAGS $GTEST_LDFLAGS"
+				LIBS_SAVED=$LIBS
+				LIBS="$LIBS $GTEST_LDADD"
+				AC_MSG_CHECKING([Checking whether gtest tests need pthreads])
+				# First try to compile without pthreads
+				AC_TRY_LINK([
+					#include <gtest/gtest.h>
+					],[
+						int i = 0;
+						char* c = NULL;
+						::testing::InitGoogleTest(&i, &c);
+						return (0);
+					],
+					[ AC_MSG_RESULT(no) ],
+					[
+						LIBS="$SAVED_LIBS $GTEST_LDADD $PTHREAD_LDFLAGS"
+						# Now try to compile with pthreads
+						AC_TRY_LINK([
+							#include <gtest/gtest.h>
+							],[
+								int i = 0;
+								char* c = NULL;
+								::testing::InitGoogleTest(&i, &c);
+								return (0);
+							],
+							[ AC_MSG_RESULT(yes)
+							  GTEST_LDADD="$GTEST_LDADD $PTHREAD_LDFLAGS"
+							],
+							# Apparently we can't compile it at all
+							[ AC_MSG_ERROR(unable to compile with gtest) ])
+				])
+				CPPFLAGS=$CPPFLAGS_SAVED
+				LDFLAGS=$LDFLAGS_SAVED
+				LIBS=$LIBS_SAVED
 				break
 			fi
 		done
@@ -735,15 +796,6 @@ if test "x$HAVE_PKG_CONFIG" = "xno" ; then
 fi
 PKG_CHECK_MODULES(SQLITE, sqlite3 >= 3.3.9, enable_features="$enable_features SQLite3")
 
-# I can't get some of the #include <asio.hpp> right without this
-# TODO: find the real cause of asio/boost wanting pthreads
-# (this currently only occurs for src/lib/cc/session_unittests)
-PTHREAD_LDFLAGS=
-AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
-AC_SUBST(PTHREAD_LDFLAGS)
-
-AC_SUBST(MULTITHREADING_FLAG)
-
 #
 # ASIO: we extensively use it as the C++ event management module.
 #
@@ -816,6 +868,7 @@ AM_CONDITIONAL(INSTALL_CONFIGURATIONS, test x$install_configurations = xyes || t
 AC_CONFIG_FILES([Makefile
                  doc/Makefile
                  doc/guide/Makefile
+                 compatcheck/Makefile
                  src/Makefile
                  src/bin/Makefile
                  src/bin/bind10/Makefile
@@ -909,6 +962,7 @@ AC_CONFIG_FILES([Makefile
                  src/lib/datasrc/tests/Makefile
                  src/lib/datasrc/tests/testdata/Makefile
                  src/lib/xfr/Makefile
+                 src/lib/xfr/tests/Makefile
                  src/lib/log/Makefile
                  src/lib/log/compiler/Makefile
                  src/lib/log/tests/Makefile
@@ -937,6 +991,7 @@ AC_CONFIG_FILES([Makefile
                  tests/tools/badpacket/tests/Makefile
                ])
 AC_OUTPUT([doc/version.ent
+           compatcheck/sqlite3-difftbl-check.py
            src/bin/cfgmgr/b10-cfgmgr.py
            src/bin/cfgmgr/tests/b10-cfgmgr_test.py
            src/bin/cmdctl/cmdctl.py
@@ -988,6 +1043,7 @@ AC_OUTPUT([doc/version.ent
            src/lib/python/bind10_config.py
            src/lib/cc/session_config.h.pre
            src/lib/cc/tests/session_unittests_config.h
+           src/lib/datasrc/datasrc_config.h.pre
            src/lib/log/tests/console_test.sh
            src/lib/log/tests/destination_test.sh
            src/lib/log/tests/init_logger_test.sh
@@ -1016,6 +1072,7 @@ AC_OUTPUT([doc/version.ent
            tests/system/ixfr/in-3/setup.sh
            tests/system/ixfr/in-4/setup.sh
           ], [
+           chmod +x compatcheck/sqlite3-difftbl-check.py
            chmod +x src/bin/cmdctl/run_b10-cmdctl.sh
            chmod +x src/bin/xfrin/run_b10-xfrin.sh
            chmod +x src/bin/xfrout/run_b10-xfrout.sh
@@ -1081,8 +1138,9 @@ dnl includes too
   Boost:         ${BOOST_INCLUDES}
   Botan:         ${BOTAN_INCLUDES}
                  ${BOTAN_LDFLAGS}
+                 ${BOTAN_LIBS}
   Log4cplus:     ${LOG4CPLUS_INCLUDES}
-                 ${LOG4CPLUS_LDFLAGS}
+                 ${LOG4CPLUS_LIBS}
   SQLite:        $SQLITE_CFLAGS
                  $SQLITE_LIBS
 
diff --git a/doc/guide/bind10-guide.html b/doc/guide/bind10-guide.html
index 97ffb84..2972cdf 100644
--- a/doc/guide/bind10-guide.html
+++ b/doc/guide/bind10-guide.html
@@ -1,21 +1,21 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20110809. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
 lass="releaseinfo">This is the reference guide for BIND 10 version
-        20110809.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Guide</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the reference guide for BIND 10 version 20111021. The most up-to-date version of this document (in PDF, HTML, and plain text formats), along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Guide"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Guide</h1></div><div><h2 class="subtitle">Administrator Reference for BIND 10</h2></div><div><p c
 lass="releaseinfo">This is the reference guide for BIND 10 version
+        20111021.</p></div><div><p class="copyright">Copyright © 2010-2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
 	Internet Systems Consortium (ISC). It includes DNS libraries
 	and modular components for controlling authoritative and
 	recursive DNS servers.
       </p><p>
-        This is the reference guide for BIND 10 version 20110809.
+        This is the reference guide for BIND 10 version 20111021.
 	The most up-to-date version of this document (in PDF, HTML,
 	and plain text formats), along with other documents for
 	BIND 10, can be found at <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
-	</p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
 lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a></span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b
 10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfe
 rs</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></dd></dl><
 /div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
+	</p></div></div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#intro">1. Introduction</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></dd><dt><span class="chapter"><a href="#installation">2. Installation</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436567">Building Requirements</a></span></dt><dt><span class="section"><a href="#quickstart">Quick start</a></span></dt><dt><span class="section"><a href="#install">Installation from source</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229436859">Download Tar File</a></span></dt><dt><span c
 lass="section"><a href="#id1168229436878">Retrieve from Git</a></span></dt><dt><span class="section"><a href="#id1168229436939">Configure before the build</a></span></dt><dt><span class="section"><a href="#id1168229437037">Build</a></span></dt><dt><span class="section"><a href="#id1168229437052">Install</a></span></dt><dt><span class="section"><a href="#id1168229437076">Install Hierarchy</a></span></dt></dl></dd></dl></dd><dt><span class="chapter"><a href="#bind10">3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></a></span></dt><dd><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt><dt><span class="section"><a href="#bind10.config">Configuration of started processes</a></span></dt></dl></dd><dt><span class="chapter"><a href="#msgq">4. Command channel</a></span></dt><dt><span class="chapter"><a href="#cfgmgr">5. Configuration manager</a></span></dt><dt><span class="chapter"><a href="#cmdctl">6. Remote control daemon</a><
 /span></dt><dd><dl><dt><span class="section"><a href="#cmdctl.spec">Configuration specification for b10-cmdctl</a></span></dt></dl></dd><dt><span class="chapter"><a href="#bindctl">7. Control and configure user interface</a></span></dt><dt><span class="chapter"><a href="#authserver">8. Authoritative Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438007">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229438072">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229438171">Loading Master Zones Files</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrin">9. Incoming Zone Transfers</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438302">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438340">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438382">Trigger an Incoming Zone Transfer Ma
 nually</a></span></dt></dl></dd><dt><span class="chapter"><a href="#xfrout">10. Outbound Zone Transfers</a></span></dt><dt><span class="chapter"><a href="#zonemgr">11. Secondary Manager</a></span></dt><dt><span class="chapter"><a href="#resolverserver">12. Recursive Name Server</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438673">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438891">Forwarding</a></span></dt></dl></dd><dt><span class="chapter"><a href="#statistics">13. Statistics</a></span></dt><dt><span class="chapter"><a href="#logging">14. Logging</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439042">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439052">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439294">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439468">Example session</a></span></dt></dl></dd><dt><s
 pan class="section"><a href="#id1168229440023">Logging Message Format</a></span></dt></dl></dd></dl></div><div class="list-of-tables"><p><b>List of Tables</b></p><dl><dt>3.1. <a href="#id1168229437338"></a></dt></dl></div><div class="chapter" title="Chapter 1. Introduction"><div class="titlepage"><div><div><h2 class="title"><a name="intro"></a>Chapter 1. Introduction</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229451238">Supported Platforms</a></span></dt><dt><span class="section"><a href="#id1168229451265">Required Software</a></span></dt><dt><span class="section"><a href="#starting_stopping">Starting and Stopping the Server</a></span></dt><dt><span class="section"><a href="#managing_once_running">Managing BIND 10</a></span></dt></dl></div><p>
       BIND is the popular implementation of a DNS server, developer
       interfaces, and DNS tools.
       BIND 10 is a rewrite of BIND 9.  BIND 10 is written in C++ and Python
       and provides a modular environment for serving and maintaining DNS.
     </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
         This guide covers the experimental prototype of
-        BIND 10 version 20110809.
+        BIND 10 version 20111021.
       </p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
         BIND 10 provides a EDNS0- and DNSSEC-capable
         authoritative DNS server and a caching recursive name server
@@ -315,11 +315,11 @@
                 <code class="filename">var/bind10-devel/</code> —
                 data source and configuration databases.
               </li></ul></div><p>
-        </p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt></dl></div><p>
+        </p></div></div></div><div class="chapter" title="Chapter 3. Starting BIND10 with bind10"><div class="titlepage"><div><div><h2 class="title"><a name="bind10"></a>Chapter 3. Starting BIND10 with <span class="command"><strong>bind10</strong></span></h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#start">Starting BIND 10</a></span></dt><dt><span class="section"><a href="#bind10.config">Configuration of started processes</a></span></dt></dl></div><p>
       BIND 10 provides the <span class="command"><strong>bind10</strong></span> command which
       starts up the required processes.
       <span class="command"><strong>bind10</strong></span>
-      will also restart processes that exit unexpectedly.
+      will also restart some processes that exit unexpectedly.
       This is the only command needed to start the BIND 10 system.
     </p><p>
       After starting the <span class="command"><strong>b10-msgq</strong></span> communications channel,
@@ -327,17 +327,20 @@
       runs the configuration manager, and reads its own configuration.
       Then it starts the other modules.
     </p><p>
-      The <span class="command"><strong>b10-msgq</strong></span> and <span class="command"><strong>b10-cfgmgr</strong></span>
+      The <span class="command"><strong>b10-sockcreator</strong></span>, <span class="command"><strong>b10-msgq</strong></span> and
+      <span class="command"><strong>b10-cfgmgr</strong></span>
       services make up the core. The <span class="command"><strong>b10-msgq</strong></span> daemon
       provides the communication channel between every part of the system.
       The <span class="command"><strong>b10-cfgmgr</strong></span> daemon is always needed by every
       module, if only to send information about themselves somewhere,
       but more importantly to ask about their own settings, and
-      about other modules.
-      The <span class="command"><strong>bind10</strong></span> master process will also start up
+      about other modules. The <span class="command"><strong>b10-sockcreator</strong></span> will
+      allocate sockets for the rest of the system.
+    </p><p>
+      In its default configuration, the <span class="command"><strong>bind10</strong></span>
+      master process will also start up
       <span class="command"><strong>b10-cmdctl</strong></span> for admins to communicate with the
-      system, <span class="command"><strong>b10-auth</strong></span> for authoritative DNS service or
-      <span class="command"><strong>b10-resolver</strong></span> for recursive name service,
+      system, <span class="command"><strong>b10-auth</strong></span> for authoritative DNS service,
       <span class="command"><strong>b10-stats</strong></span> for statistics collection,
       <span class="command"><strong>b10-xfrin</strong></span> for inbound DNS zone transfers,
       <span class="command"><strong>b10-xfrout</strong></span> for outbound DNS zone transfers,
@@ -351,7 +354,107 @@
           the process names for the Python-based daemons will be renamed
           to better identify them instead of just <span class="quote">“<span class="quote">python</span>”</span>.
           This is not needed on some operating systems.
-        </p></div></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
+        </p></div></div><div class="section" title="Configuration of started processes"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="bind10.config"></a>Configuration of started processes</h2></div></div></div><p>
+        The processes to be started can be configured, with the exception
+        of the <span class="command"><strong>b10-sockcreator</strong></span>, <span class="command"><strong>b10-msgq</strong></span>
+        and <span class="command"><strong>b10-cfgmgr</strong></span>.
+      </p><p>
+        The configuration is in the Boss/components section. Each element
+        represents one component, which is an abstraction of a process
+        (currently there's also one component which doesn't represent
+        a process). If you didn't want to transfer out at all (your server
+        is a slave only), you would just remove the corresponding component
+        from the set, like this and the process would be stopped immediately
+        (and not started on the next startup):
+      </p><pre class="screen">> <strong class="userinput"><code>config remove Boss/components b10-xfrout</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+      </p><p>
+        To add a process to the set, let's say the resolver (which not started
+        by default), you would do this:
+        </p><pre class="screen">> <strong class="userinput"><code>config add Boss/components b10-resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/special resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/kind needed</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/priority 10</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+        Now, what it means. We add an entry called b10-resolver. It is both a
+        name used to reference this component in the configuration and the
+        name of the process to start. Then we set some parameters on how to
+        start it.
+      </p><p>
+        The special one is for components that need some kind of special care
+        during startup or shutdown. Unless specified, the component is started
+        in usual way. This is the list of components that need to be started
+        in a special way, with the value of special used for them:
+        </p><div class="table"><a name="id1168229437338"></a><p class="title"><b>Table 3.1. </b></p><div class="table-contents"><table border="1"><colgroup><col align="left"><col align="left"><col align="left"></colgroup><thead><tr><th align="left">Component</th><th align="left">Special</th><th align="left">Description</th></tr></thead><tbody><tr><td align="left">b10-auth</td><td align="left">auth</td><td align="left">Authoritative server</td></tr><tr><td align="left">b10-resolver</td><td align="left">resolver</td><td align="left">The resolver</td></tr><tr><td align="left">b10-cmdctl</td><td align="left">cmdctl</td><td align="left">The command control (remote control interface)</td></tr><tr><td align="left">setuid</td><td align="left">setuid</td><td align="left">Virtual component, see below</td></tr></tbody></table></div></div><p><br class="table-break">
+      </p><p>
+	The kind specifies how a failure of the component should
+	be handled.  If it is set to <span class="quote">“<span class="quote">dispensable</span>”</span>
+	(the default unless you set something else), it will get
+	started again if it fails. If it is set to <span class="quote">“<span class="quote">needed</span>”</span>
+	and it fails at startup, the whole <span class="command"><strong>bind10</strong></span>
+	shuts down and exits with error exit code. But if it fails
+	some time later, it is just started again. If you set it
+	to <span class="quote">“<span class="quote">core</span>”</span>, you indicate that the system is
+	not usable without the component and if such component
+	fails, the system shuts down no matter when the failure
+	happened.  This is the behaviour of the core components
+	(the ones you can't turn off), but you can declare any
+	other components as core as well if you wish (but you can
+	turn these off, they just can't fail).
+      </p><p>
+        The priority defines order in which the components should start.
+        The ones with higher number are started sooner than the ones with
+        lower ones. If you don't set it, 0 (zero) is used as the priority.
+      </p><p>
+        There are other parameters we didn't use in our example.
+	One of them is <span class="quote">“<span class="quote">address</span>”</span>. It is the address
+	used by the component on the <span class="command"><strong>b10-msgq</strong></span>
+	message bus. The special components already know their
+	address, but the usual ones don't. The address is by
+	convention the thing after <span class="emphasis"><em>b10-</em></span>, with
+	the first letter capital (eg. <span class="command"><strong>b10-stats</strong></span>
+	would have <span class="quote">“<span class="quote">Stats</span>”</span> as its address).
+
+      </p><p>
+        The last one is process. It is the name of the process to be started.
+        It defaults to the name of the component if not set, but you can use
+        this to override it.
+      </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+          This system allows you to start the same component multiple times
+          (by including it in the configuration with different names, but the
+          same process setting). However, the rest of the system doesn't expect
+          such situation, so it would probably not do what you want. Such
+          support is yet to be implemented.
+        </p></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+	  The configuration is quite powerful, but that includes
+	  a lot of space for mistakes. You could turn off the
+	  <span class="command"><strong>b10-cmdctl</strong></span>, but then you couldn't
+	  change it back the usual way, as it would require it to
+	  be running (you would have to find and edit the configuration
+	  directly).  Also, some modules might have dependencies
+	  -- <span class="command"><strong>b10-stats-httpd</strong></span> need
+	  <span class="command"><strong>b10-stats</strong></span>, <span class="command"><strong>b10-xfrout</strong></span>
+	  needs the <span class="command"><strong>b10-auth</strong></span> to be running, etc.
+
+
+
+        </p><p>
+          In short, you should think twice before disabling something here.
+        </p></div><p>
+	Now, to the mysterious setuid virtual component. If you
+	use the <span class="command"><strong>-u</strong></span> option to start the
+	<span class="command"><strong>bind10</strong></span> as root, but change the user
+	later, we need to start the <span class="command"><strong>b10-auth</strong></span> or
+	<span class="command"><strong>b10-resolver</strong></span> as root (until the socket
+	creator is finished). So we need to specify
+	the time when the switch from root do the given user happens
+	and that's what the setuid component is for. The switch is
+	done at the time the setuid component would be started, if
+	it was a process. The default configuration contains the
+	setuid component with priority 5, <span class="command"><strong>b10-auth</strong></span>
+	has 10 to be started before the switch and everything else
+	is without priority, so it is started after the switch.
+      </p></div></div><div class="chapter" title="Chapter 4. Command channel"><div class="titlepage"><div><div><h2 class="title"><a name="msgq"></a>Chapter 4. Command channel</h2></div></div></div><p>
         The BIND 10 components use the <span class="command"><strong>b10-msgq</strong></span>
         message routing daemon to communicate with other BIND 10 components.
         The <span class="command"><strong>b10-msgq</strong></span> implements what is called the
@@ -507,12 +610,12 @@ shutdown
       the details and relays (over a <span class="command"><strong>b10-msgq</strong></span> command
       channel) the configuration on to the specified module.
     </p><p>
-    </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437660">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229437725">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229437755">Loading Master Zones Files</a></span></dt></dl></div><p>
+    </p></div><div class="chapter" title="Chapter 8. Authoritative Server"><div class="titlepage"><div><div><h2 class="title"><a name="authserver"></a>Chapter 8. Authoritative Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438007">Server Configurations</a></span></dt><dt><span class="section"><a href="#id1168229438072">Data Source Backends</a></span></dt><dt><span class="section"><a href="#id1168229438171">Loading Master Zones Files</a></span></dt></dl></div><p>
       The <span class="command"><strong>b10-auth</strong></span> is the authoritative DNS server.
       It supports EDNS0 and DNSSEC. It supports IPv6.
       Normally it is started by the <span class="command"><strong>bind10</strong></span> master
       process.
-    </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437660"></a>Server Configurations</h2></div></div></div><p>
+    </p><div class="section" title="Server Configurations"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438007"></a>Server Configurations</h2></div></div></div><p>
         <span class="command"><strong>b10-auth</strong></span> is configured via the
         <span class="command"><strong>b10-cfgmgr</strong></span> configuration manager.
         The module name is <span class="quote">“<span class="quote">Auth</span>”</span>.
@@ -532,7 +635,7 @@ This may be a temporary setting until then.
         </p><div class="variablelist"><dl><dt><span class="term">shutdown</span></dt><dd>Stop the authoritative DNS server.
               </dd></dl></div><p>
 
-      </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437725"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+      </p></div><div class="section" title="Data Source Backends"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438072"></a>Data Source Backends</h2></div></div></div><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
         For the development prototype release, <span class="command"><strong>b10-auth</strong></span>
         supports a SQLite3 data source backend and in-memory data source
         backend.
@@ -546,7 +649,7 @@ This may be a temporary setting until then.
         The default is <code class="filename">/usr/local/var/</code>.)
   This data file location may be changed by defining the
   <span class="quote">“<span class="quote">database_file</span>”</span> configuration.
-      </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437755"></a>Loading Master Zones Files</h2></div></div></div><p>
+      </p></div><div class="section" title="Loading Master Zones Files"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438171"></a>Loading Master Zones Files</h2></div></div></div><p>
         RFC 1035 style DNS master zone files may imported
         into a BIND 10 data source by using the
         <span class="command"><strong>b10-loadzone</strong></span> utility.
@@ -575,7 +678,7 @@ This may be a temporary setting until then.
         If you reload a zone already existing in the database,
         all records from that prior zone disappear and a whole new set
         appears.
-      </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229437989">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438027">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438069">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
+      </p></div></div><div class="chapter" title="Chapter 9. Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title"><a name="xfrin"></a>Chapter 9. Incoming Zone Transfers</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438302">Configuration for Incoming Zone Transfers</a></span></dt><dt><span class="section"><a href="#id1168229438340">Enabling IXFR</a></span></dt><dt><span class="section"><a href="#id1168229438382">Trigger an Incoming Zone Transfer Manually</a></span></dt></dl></div><p>
       Incoming zones are transferred using the <span class="command"><strong>b10-xfrin</strong></span>
       process which is started by <span class="command"><strong>bind10</strong></span>.
       When received, the zone is stored in the corresponding BIND 10
@@ -593,7 +696,7 @@ This may be a temporary setting until then.
      In the current development release of BIND 10, incoming zone
      transfers are only available for SQLite3-based data sources,
      that is, they don't work for an in-memory data source.
-    </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229437989"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
+    </p></div><div class="section" title="Configuration for Incoming Zone Transfers"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438302"></a>Configuration for Incoming Zone Transfers</h2></div></div></div><p>
 	In practice, you need to specify a list of secondary zones to
 	enable incoming zone transfers for these zones (you can still
 	trigger a zone transfer manually, without a prior configuration
@@ -609,7 +712,7 @@ This may be a temporary setting until then.
 > <strong class="userinput"><code>config commit</code></strong></pre><p>
 
       (We assume there has been no zone configuration before).
-      </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438027"></a>Enabling IXFR</h2></div></div></div><p>
+      </p></div><div class="section" title="Enabling IXFR"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438340"></a>Enabling IXFR</h2></div></div></div><p>
         As noted above, <span class="command"><strong>b10-xfrin</strong></span> uses AXFR for
         zone transfers by default.  To enable IXFR for zone transfers
         for a particular zone, set the <strong class="userinput"><code>use_ixfr</code></strong>
@@ -631,7 +734,7 @@ This may be a temporary setting until then.
       make this selection automatically.
       These features will be implemented in a near future
       version, at which point we will enable IXFR by default.
-      </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438069"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
+      </p></div></div><div class="section" title="Trigger an Incoming Zone Transfer Manually"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438382"></a>Trigger an Incoming Zone Transfer Manually</h2></div></div></div><p>
 	To manually trigger a zone transfer to retrieve a remote zone,
 	you may use the <span class="command"><strong>bindctl</strong></span> utility.
 	For example, at the <span class="command"><strong>bindctl</strong></span> prompt run:
@@ -641,16 +744,53 @@ This may be a temporary setting until then.
       The <span class="command"><strong>b10-xfrout</strong></span> process is started by
       <span class="command"><strong>bind10</strong></span>.
       When the <span class="command"><strong>b10-auth</strong></span> authoritative DNS server
-      receives an AXFR request, <span class="command"><strong>b10-xfrout</strong></span>
-      sends the zone.
-      This is used to provide master DNS service to share zones
+      receives an AXFR or IXFR request, <span class="command"><strong>b10-auth</strong></span>
+      internally forwards the request to <span class="command"><strong>b10-xfrout</strong></span>,
+      which handles the rest of request processing.
+      This is used to provide primary DNS service to share zones
       to secondary name servers.
       The <span class="command"><strong>b10-xfrout</strong></span> is also used to send
-      NOTIFY messages to slaves.
+      NOTIFY messages to secondary servers.
+    </p><p>
+      A global or per zone <code class="option">transfer_acl</code> configuration
+      can be used to control accessibility of the outbound zone
+      transfer service.
+      By default, <span class="command"><strong>b10-xfrout</strong></span> allows any clients to
+      perform zone transfers for any zones:
+    </p><pre class="screen">> <strong class="userinput"><code>config show Xfrout/transfer_acl</code></strong>
+Xfrout/transfer_acl[0]	{"action": "ACCEPT"}	any	(default)</pre><p>
+      You can change this to, for example, rejecting all transfer
+      requests by default while allowing requests for the transfer
+      of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
+    </p><pre class="screen">> <strong class="userinput"><code>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</code></strong>
+> <strong class="userinput"><code>config add Xfrout/zone_config</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/origin "example.com"</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</code></strong>
+<strong class="userinput"><code>                                                 {"action": "ACCEPT", "from": "2001:db8::1"}]</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
+	In the above example the lines
+	for <code class="option">transfer_acl</code> were divided for
+	readability.  In the actual input it must be in a single line.
+    </p></div><p>
+      If you want to require TSIG in access control, a separate TSIG
+      "key ring" must be configured specifically
+      for <span class="command"><strong>b10-xfrout</strong></span> as well as a system wide
+      key ring, both containing a consistent set of keys.
+      For example, to change the previous example to allowing requests
+      from 192.0.2.1 signed by a TSIG with a key name of
+      "key.example", you'll need to do this:
+    </p><pre class="screen">> <strong class="userinput"><code>config set tsig_keys/keys ["key.example:<base64-key>"]</code></strong>
+> <strong class="userinput"><code>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</code></strong>
+> <strong class="userinput"><code>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</code></strong>
+> <strong class="userinput"><code>config commit</code></strong></pre><p>
+      The first line of configuration defines a system wide key ring.
+      This is necessary because the <span class="command"><strong>b10-auth</strong></span> server
+      also checks TSIGs and it uses the system wide configuration.
     </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
-     The current development release of BIND 10 only supports
-     AXFR. (IXFR is not supported.)
-     Access control is not yet provided.
+	In a future version, <span class="command"><strong>b10-xfrout</strong></span> will also
+	use the system wide TSIG configuration.
+	The way to specify zone specific configuration (ACLs, etc) is
+	likely to be changed, too.
     </p></div></div><div class="chapter" title="Chapter 11. Secondary Manager"><div class="titlepage"><div><div><h2 class="title"><a name="zonemgr"></a>Chapter 11. Secondary Manager</h2></div></div></div><p>
       The <span class="command"><strong>b10-zonemgr</strong></span> process is started by
       <span class="command"><strong>bind10</strong></span>.
@@ -665,7 +805,7 @@ This may be a temporary setting until then.
     </p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>
      Access control (such as allowing notifies) is not yet provided.
      The primary/secondary service is not yet complete.
-    </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438327">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438512">Forwarding</a></span></dt></dl></div><p>
+    </p></div></div><div class="chapter" title="Chapter 12. Recursive Name Server"><div class="titlepage"><div><div><h2 class="title"><a name="resolverserver"></a>Chapter 12. Recursive Name Server</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438673">Access Control</a></span></dt><dt><span class="section"><a href="#id1168229438891">Forwarding</a></span></dt></dl></div><p>
       The <span class="command"><strong>b10-resolver</strong></span> process is started by
       <span class="command"><strong>bind10</strong></span>.
 
@@ -678,8 +818,13 @@ This may be a temporary setting until then.
       You may change this using <span class="command"><strong>bindctl</strong></span>, for example:
 
       </p><pre class="screen">
-> <strong class="userinput"><code>config set Boss/start_auth false</code></strong>
-> <strong class="userinput"><code>config set Boss/start_resolver true</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-xfrout</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-xfrin</code></strong>
+> <strong class="userinput"><code>config remove Boss/components b10-auth</code></strong>
+> <strong class="userinput"><code>config add Boss/components b10-resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/special resolver</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/kind needed</code></strong>
+> <strong class="userinput"><code>config set Boss/components/b10-resolver/priority 10</code></strong>
 > <strong class="userinput"><code>config commit</code></strong>
 </pre><p>
 
@@ -699,7 +844,7 @@ This may be a temporary setting until then.
 </pre><p>
     </p><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
        as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
-       Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438327"></a>Access Control</h2></div></div></div><p>
+       Resolver/listen_on</code></strong></span>”</span> if needed.)</p><div class="section" title="Access Control"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438673"></a>Access Control</h2></div></div></div><p>
         By default, the <span class="command"><strong>b10-resolver</strong></span> daemon only accepts
         DNS queries from the localhost (127.0.0.1 and ::1).
         The <code class="option">Resolver/query_acl</code> configuration may
@@ -732,7 +877,7 @@ This may be a temporary setting until then.
 </pre><p>(Replace the <span class="quote">“<span class="quote"><em class="replaceable"><code>2</code></em></span>”</span>
        as needed; run <span class="quote">“<span class="quote"><strong class="userinput"><code>config show
        Resolver/query_acl</code></strong></span>”</span> if needed.)</p><div class="note" title="Note" style="margin-left: 0.5in; margin-right: 0.5in;"><h3 class="title">Note</h3><p>This prototype access control configuration
-      syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438512"></a>Forwarding</h2></div></div></div><p>
+      syntax may be changed.</p></div></div><div class="section" title="Forwarding"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438891"></a>Forwarding</h2></div></div></div><p>
 
         To enable forwarding, the upstream address and port must be
         configured to forward queries to, such as:
@@ -786,7 +931,7 @@ This may be a temporary setting until then.
     }
 }
        </pre><p>
-    </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229438628">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229438638">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439154">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439328">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229439609">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229438628"></a>Logging configuration</h2></div></div></div><p>
+    </p></div><div class="chapter" title="Chapter 14. Logging"><div class="titlepage"><div><div><h2 class="title"><a name="logging"></a>Chapter 14. Logging</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="section"><a href="#id1168229439042">Logging configuration</a></span></dt><dd><dl><dt><span class="section"><a href="#id1168229439052">Loggers</a></span></dt><dt><span class="section"><a href="#id1168229439294">Output Options</a></span></dt><dt><span class="section"><a href="#id1168229439468">Example session</a></span></dt></dl></dd><dt><span class="section"><a href="#id1168229440023">Logging Message Format</a></span></dt></dl></div><div class="section" title="Logging configuration"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439042"></a>Logging configuration</h2></div></div></div><p>
 
 	The logging system in BIND 10 is configured through the
 	Logging module. All BIND 10 modules will look at the
@@ -795,7 +940,7 @@ This may be a temporary setting until then.
 
 
 
-      </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229438638"></a>Loggers</h3></div></div></div><p>
+      </p><div class="section" title="Loggers"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439052"></a>Loggers</h3></div></div></div><p>
 
 	  Within BIND 10, a message is logged through a component
 	  called a "logger". Different parts of BIND 10 log messages
@@ -816,7 +961,7 @@ This may be a temporary setting until then.
 	  (what to log), and the <code class="option">output_options</code>
 	  (where to log).
 
-        </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229438663"></a>name (string)</h4></div></div></div><p>
+        </p><div class="section" title="name (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439077"></a>name (string)</h4></div></div></div><p>
 	  Each logger in the system has a name, the name being that
 	  of the component using it to log messages. For instance,
 	  if you want to configure logging for the resolver module,
@@ -889,7 +1034,7 @@ This may be a temporary setting until then.
 	  <span class="quote">“<span class="quote">Auth.cache</span>”</span> logger will appear in the output
 	  with a logger name of <span class="quote">“<span class="quote">b10-auth.cache</span>”</span>).
 
-        </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439035"></a>severity (string)</h4></div></div></div><p>
+        </p></div><div class="section" title="severity (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439176"></a>severity (string)</h4></div></div></div><p>
 
           This specifies the category of messages logged.
 	  Each message is logged with an associated severity which
@@ -905,7 +1050,7 @@ This may be a temporary setting until then.
 
 
 
-        </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439086"></a>output_options (list)</h4></div></div></div><p>
+        </p></div><div class="section" title="output_options (list)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439227"></a>output_options (list)</h4></div></div></div><p>
 
 	  Each logger can have zero or more
 	  <code class="option">output_options</code>. These specify where log
@@ -915,7 +1060,7 @@ This may be a temporary setting until then.
 
           The other options for a logger are:
 
-        </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439102"></a>debuglevel (integer)</h4></div></div></div><p>
+        </p></div><div class="section" title="debuglevel (integer)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439243"></a>debuglevel (integer)</h4></div></div></div><p>
 
 	  When a logger's severity is set to DEBUG, this value
 	  specifies what debug messages should be printed. It ranges
@@ -924,7 +1069,7 @@ This may be a temporary setting until then.
 
           If severity for the logger is not DEBUG, this value is ignored.
 
-        </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439117"></a>additive (true or false)</h4></div></div></div><p>
+        </p></div><div class="section" title="additive (true or false)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439258"></a>additive (true or false)</h4></div></div></div><p>
 
 	  If this is true, the <code class="option">output_options</code> from
 	  the parent will be used. For example, if there are two
@@ -938,18 +1083,18 @@ This may be a temporary setting until then.
 
 
 
-      </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439154"></a>Output Options</h3></div></div></div><p>
+      </p></div></div><div class="section" title="Output Options"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439294"></a>Output Options</h3></div></div></div><p>
 
 	  The main settings for an output option are the
 	  <code class="option">destination</code> and a value called
 	  <code class="option">output</code>, the meaning of which depends on
 	  the destination that is set.
 
-        </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439169"></a>destination (string)</h4></div></div></div><p>
+        </p><div class="section" title="destination (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439309"></a>destination (string)</h4></div></div></div><p>
 
             The destination is the type of output. It can be one of:
 
-          </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439201"></a>output (string)</h4></div></div></div><p>
+          </p><div class="itemizedlist"><ul class="itemizedlist" type="disc"><li class="listitem"> console </li><li class="listitem"> file </li><li class="listitem"> syslog </li></ul></div></div><div class="section" title="output (string)"><div class="titlepage"><div><div><h4 class="title"><a name="id1168229439341"></a>output (string)</h4></div></div></div><p>
 
 	  Depending on what is set as the output destination, this
 	  value is interpreted as follows:
@@ -971,12 +1116,12 @@ This may be a temporary setting until then.
 
           The other options for <code class="option">output_options</code> are:
 
-        </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439286"></a>flush (true of false)</h5></div></div></div><p>
+        </p><div class="section" title="flush (true of false)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439427"></a>flush (true of false)</h5></div></div></div><p>
 	    Flush buffers after each log message. Doing this will
 	    reduce performance but will ensure that if the program
 	    terminates abnormally, all messages up to the point of
 	    termination are output.
-          </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439296"></a>maxsize (integer)</h5></div></div></div><p>
+          </p></div><div class="section" title="maxsize (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439436"></a>maxsize (integer)</h5></div></div></div><p>
 	    Only relevant when destination is file, this is maximum
 	    file size of output files in bytes. When the maximum
 	    size is reached, the file is renamed and a new file opened.
@@ -985,11 +1130,11 @@ This may be a temporary setting until then.
             etc.)
           </p><p>
             If this is 0, no maximum file size is used.
-          </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439308"></a>maxver (integer)</h5></div></div></div><p>
+          </p></div><div class="section" title="maxver (integer)"><div class="titlepage"><div><div><h5 class="title"><a name="id1168229439449"></a>maxver (integer)</h5></div></div></div><p>
 	    Maximum number of old log files to keep around when
 	    rolling the output file. Only relevant when
 	    <code class="option">destination</code> is <span class="quote">“<span class="quote">file</span>”</span>.
-          </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439328"></a>Example session</h3></div></div></div><p>
+          </p></div></div></div><div class="section" title="Example session"><div class="titlepage"><div><div><h3 class="title"><a name="id1168229439468"></a>Example session</h3></div></div></div><p>
 
 	  In this example we want to set the global logging to
 	  write to the file <code class="filename">/var/log/my_bind10.log</code>,
@@ -1150,7 +1295,7 @@ Logging/loggers[0]/output_options[0]/maxver	8	integer	(modified)
 	  And every module will now be using the values from the
 	  logger named <span class="quote">“<span class="quote">*</span>”</span>.
 
-        </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229439609"></a>Logging Message Format</h2></div></div></div><p>
+        </p></div></div><div class="section" title="Logging Message Format"><div class="titlepage"><div><div><h2 class="title" style="clear: both"><a name="id1168229440023"></a>Logging Message Format</h2></div></div></div><p>
 	  Each message written by BIND 10 to the configured logging
 	  destinations comprises a number of components that identify
 	  the origin of the message and, if the message indicates
diff --git a/doc/guide/bind10-guide.txt b/doc/guide/bind10-guide.txt
index 619d56f..9c8ffbe 100644
--- a/doc/guide/bind10-guide.txt
+++ b/doc/guide/bind10-guide.txt
@@ -2,7 +2,7 @@
 
 Administrator Reference for BIND 10
 
-   This is the reference guide for BIND 10 version 20110809.
+   This is the reference guide for BIND 10 version 20111021.
 
    Copyright (c) 2010-2011 Internet Systems Consortium, Inc.
 
@@ -12,7 +12,7 @@ Administrator Reference for BIND 10
    Consortium (ISC). It includes DNS libraries and modular components for
    controlling authoritative and recursive DNS servers.
 
-   This is the reference guide for BIND 10 version 20110809. The most
+   This is the reference guide for BIND 10 version 20111021. The most
    up-to-date version of this document (in PDF, HTML, and plain text
    formats), along with other documents for BIND 10, can be found at
    http://bind10.isc.org/docs.
@@ -55,6 +55,8 @@ Administrator Reference for BIND 10
 
                 Starting BIND 10
 
+                Configuration of started processes
+
    4. Command channel
 
    5. Configuration manager
@@ -105,6 +107,10 @@ Administrator Reference for BIND 10
 
                 Logging Message Format
 
+   List of Tables
+
+   3.1.
+
 Chapter 1. Introduction
 
    Table of Contents
@@ -124,7 +130,7 @@ Chapter 1. Introduction
 
   Note
 
-   This guide covers the experimental prototype of BIND 10 version 20110809.
+   This guide covers the experimental prototype of BIND 10 version 20111021.
 
   Note
 
@@ -427,24 +433,28 @@ Chapter 3. Starting BIND10 with bind10
 
    Starting BIND 10
 
+   Configuration of started processes
+
    BIND 10 provides the bind10 command which starts up the required
-   processes. bind10 will also restart processes that exit unexpectedly. This
-   is the only command needed to start the BIND 10 system.
+   processes. bind10 will also restart some processes that exit unexpectedly.
+   This is the only command needed to start the BIND 10 system.
 
    After starting the b10-msgq communications channel, bind10 connects to it,
    runs the configuration manager, and reads its own configuration. Then it
    starts the other modules.
 
-   The b10-msgq and b10-cfgmgr services make up the core. The b10-msgq daemon
-   provides the communication channel between every part of the system. The
-   b10-cfgmgr daemon is always needed by every module, if only to send
-   information about themselves somewhere, but more importantly to ask about
-   their own settings, and about other modules. The bind10 master process
-   will also start up b10-cmdctl for admins to communicate with the system,
-   b10-auth for authoritative DNS service or b10-resolver for recursive name
-   service, b10-stats for statistics collection, b10-xfrin for inbound DNS
-   zone transfers, b10-xfrout for outbound DNS zone transfers, and
-   b10-zonemgr for secondary service.
+   The b10-sockcreator, b10-msgq and b10-cfgmgr services make up the core.
+   The b10-msgq daemon provides the communication channel between every part
+   of the system. The b10-cfgmgr daemon is always needed by every module, if
+   only to send information about themselves somewhere, but more importantly
+   to ask about their own settings, and about other modules. The
+   b10-sockcreator will allocate sockets for the rest of the system.
+
+   In its default configuration, the bind10 master process will also start up
+   b10-cmdctl for admins to communicate with the system, b10-auth for
+   authoritative DNS service, b10-stats for statistics collection, b10-xfrin
+   for inbound DNS zone transfers, b10-xfrout for outbound DNS zone
+   transfers, and b10-zonemgr for secondary service.
 
 Starting BIND 10
 
@@ -457,6 +467,110 @@ Starting BIND 10
    names for the Python-based daemons will be renamed to better identify them
    instead of just "python". This is not needed on some operating systems.
 
+Configuration of started processes
+
+   The processes to be started can be configured, with the exception of the
+   b10-sockcreator, b10-msgq and b10-cfgmgr.
+
+   The configuration is in the Boss/components section. Each element
+   represents one component, which is an abstraction of a process (currently
+   there's also one component which doesn't represent a process). If you
+   didn't want to transfer out at all (your server is a slave only), you
+   would just remove the corresponding component from the set, like this and
+   the process would be stopped immediately (and not started on the next
+   startup):
+
+ > config remove Boss/components b10-xfrout
+ > config commit
+
+   To add a process to the set, let's say the resolver (which not started by
+   default), you would do this:
+
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
+ > config commit
+
+   Now, what it means. We add an entry called b10-resolver. It is both a name
+   used to reference this component in the configuration and the name of the
+   process to start. Then we set some parameters on how to start it.
+
+   The special one is for components that need some kind of special care
+   during startup or shutdown. Unless specified, the component is started in
+   usual way. This is the list of components that need to be started in a
+   special way, with the value of special used for them:
+
+   Table 3.1.
+
+   +------------------------------------------------------------------------+
+   | Component    | Special  | Description                                  |
+   |--------------+----------+----------------------------------------------|
+   | b10-auth     | auth     | Authoritative server                         |
+   |--------------+----------+----------------------------------------------|
+   | b10-resolver | resolver | The resolver                                 |
+   |--------------+----------+----------------------------------------------|
+   | b10-cmdctl   | cmdctl   | The command control (remote control          |
+   |              |          | interface)                                   |
+   |--------------+----------+----------------------------------------------|
+   | setuid       | setuid   | Virtual component, see below                 |
+   +------------------------------------------------------------------------+
+
+   The kind specifies how a failure of the component should be handled. If it
+   is set to "dispensable" (the default unless you set something else), it
+   will get started again if it fails. If it is set to "needed" and it fails
+   at startup, the whole bind10 shuts down and exits with error exit code.
+   But if it fails some time later, it is just started again. If you set it
+   to "core", you indicate that the system is not usable without the
+   component and if such component fails, the system shuts down no matter
+   when the failure happened. This is the behaviour of the core components
+   (the ones you can't turn off), but you can declare any other components as
+   core as well if you wish (but you can turn these off, they just can't
+   fail).
+
+   The priority defines order in which the components should start. The ones
+   with higher number are started sooner than the ones with lower ones. If
+   you don't set it, 0 (zero) is used as the priority.
+
+   There are other parameters we didn't use in our example. One of them is
+   "address". It is the address used by the component on the b10-msgq message
+   bus. The special components already know their address, but the usual ones
+   don't. The address is by convention the thing after b10-, with the first
+   letter capital (eg. b10-stats would have "Stats" as its address).
+
+   The last one is process. It is the name of the process to be started. It
+   defaults to the name of the component if not set, but you can use this to
+   override it.
+
+  Note
+
+   This system allows you to start the same component multiple times (by
+   including it in the configuration with different names, but the same
+   process setting). However, the rest of the system doesn't expect such
+   situation, so it would probably not do what you want. Such support is yet
+   to be implemented.
+
+  Note
+
+   The configuration is quite powerful, but that includes a lot of space for
+   mistakes. You could turn off the b10-cmdctl, but then you couldn't change
+   it back the usual way, as it would require it to be running (you would
+   have to find and edit the configuration directly). Also, some modules
+   might have dependencies -- b10-stats-httpd need b10-stats, b10-xfrout
+   needs the b10-auth to be running, etc.
+
+   In short, you should think twice before disabling something here.
+
+   Now, to the mysterious setuid virtual component. If you use the -u option
+   to start the bind10 as root, but change the user later, we need to start
+   the b10-auth or b10-resolver as root (until the socket creator is
+   finished). So we need to specify the time when the switch from root do the
+   given user happens and that's what the setuid component is for. The switch
+   is done at the time the setuid component would be started, if it was a
+   process. The default configuration contains the setuid component with
+   priority 5, b10-auth has 10 to be started before the switch and everything
+   else is without priority, so it is started after the switch.
+
 Chapter 4. Command channel
 
    The BIND 10 components use the b10-msgq message routing daemon to
@@ -739,15 +853,55 @@ Trigger an Incoming Zone Transfer Manually
 Chapter 10. Outbound Zone Transfers
 
    The b10-xfrout process is started by bind10. When the b10-auth
-   authoritative DNS server receives an AXFR request, b10-xfrout sends the
-   zone. This is used to provide master DNS service to share zones to
-   secondary name servers. The b10-xfrout is also used to send NOTIFY
-   messages to slaves.
+   authoritative DNS server receives an AXFR or IXFR request, b10-auth
+   internally forwards the request to b10-xfrout, which handles the rest of
+   request processing. This is used to provide primary DNS service to share
+   zones to secondary name servers. The b10-xfrout is also used to send
+   NOTIFY messages to secondary servers.
+
+   A global or per zone transfer_acl configuration can be used to control
+   accessibility of the outbound zone transfer service. By default,
+   b10-xfrout allows any clients to perform zone transfers for any zones:
+
+ > config show Xfrout/transfer_acl
+ Xfrout/transfer_acl[0]  {"action": "ACCEPT"}    any     (default)
+
+   You can change this to, for example, rejecting all transfer requests by
+   default while allowing requests for the transfer of zone "example.com"
+   from 192.0.2.1 and 2001:db8::1 as follows:
+
+ > config set Xfrout/transfer_acl[0] {"action": "REJECT"}
+ > config add Xfrout/zone_config
+ > config set Xfrout/zone_config[0]/origin "example.com"
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},
+                                                  {"action": "ACCEPT", "from": "2001:db8::1"}]
+ > config commit
+
+  Note
+
+   In the above example the lines for transfer_acl were divided for
+   readability. In the actual input it must be in a single line.
+
+   If you want to require TSIG in access control, a separate TSIG "key ring"
+   must be configured specifically for b10-xfrout as well as a system wide
+   key ring, both containing a consistent set of keys. For example, to change
+   the previous example to allowing requests from 192.0.2.1 signed by a TSIG
+   with a key name of "key.example", you'll need to do this:
+
+ > config set tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]
+ > config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]
+ > config commit
+
+   The first line of configuration defines a system wide key ring. This is
+   necessary because the b10-auth server also checks TSIGs and it uses the
+   system wide configuration.
 
   Note
 
-   The current development release of BIND 10 only supports AXFR. (IXFR is
-   not supported.) Access control is not yet provided.
+   In a future version, b10-xfrout will also use the system wide TSIG
+   configuration. The way to specify zone specific configuration (ACLs, etc)
+   is likely to be changed, too.
 
 Chapter 11. Secondary Manager
 
@@ -777,8 +931,13 @@ Chapter 12. Recursive Name Server
    authoritative or resolver or both. By default, it starts the authoritative
    service. You may change this using bindctl, for example:
 
- > config set Boss/start_auth false
- > config set Boss/start_resolver true
+ > config remove Boss/components b10-xfrout
+ > config remove Boss/components b10-xfrin
+ > config remove Boss/components b10-auth
+ > config add Boss/components b10-resolver
+ > config set Boss/components/b10-resolver/special resolver
+ > config set Boss/components/b10-resolver/kind needed
+ > config set Boss/components/b10-resolver/priority 10
  > config commit
 
    The master bind10 will stop and start the desired services.
diff --git a/doc/guide/bind10-guide.xml b/doc/guide/bind10-guide.xml
index 21bb671..e61725f 100644
--- a/doc/guide/bind10-guide.xml
+++ b/doc/guide/bind10-guide.xml
@@ -706,7 +706,7 @@ Debian and Ubuntu:
       BIND 10 provides the <command>bind10</command> command which
       starts up the required processes.
       <command>bind10</command>
-      will also restart processes that exit unexpectedly.
+      will also restart some processes that exit unexpectedly.
       This is the only command needed to start the BIND 10 system.
     </para>
 
@@ -718,17 +718,22 @@ Debian and Ubuntu:
     </para>
 
     <para>
-      The <command>b10-msgq</command> and <command>b10-cfgmgr</command>
+      The <command>b10-sockcreator</command>, <command>b10-msgq</command> and
+      <command>b10-cfgmgr</command>
       services make up the core. The <command>b10-msgq</command> daemon
       provides the communication channel between every part of the system.
       The <command>b10-cfgmgr</command> daemon is always needed by every
       module, if only to send information about themselves somewhere,
       but more importantly to ask about their own settings, and
-      about other modules.
-      The <command>bind10</command> master process will also start up
+      about other modules. The <command>b10-sockcreator</command> will
+      allocate sockets for the rest of the system.
+    </para>
+
+    <para>
+      In its default configuration, the <command>bind10</command>
+      master process will also start up
       <command>b10-cmdctl</command> for admins to communicate with the
-      system, <command>b10-auth</command> for authoritative DNS service or
-      <command>b10-resolver</command> for recursive name service,
+      system, <command>b10-auth</command> for authoritative DNS service,
       <command>b10-stats</command> for statistics collection,
       <command>b10-xfrin</command> for inbound DNS zone transfers,
       <command>b10-xfrout</command> for outbound DNS zone transfers,
@@ -754,6 +759,159 @@ Debian and Ubuntu:
       </note>
 
     </section>
+    <section id="bind10.config">
+      <title>Configuration of started processes</title>
+      <para>
+        The processes to be started can be configured, with the exception
+        of the <command>b10-sockcreator</command>, <command>b10-msgq</command>
+        and <command>b10-cfgmgr</command>.
+      </para>
+
+      <para>
+        The configuration is in the Boss/components section. Each element
+        represents one component, which is an abstraction of a process
+        (currently there's also one component which doesn't represent
+        a process). If you didn't want to transfer out at all (your server
+        is a slave only), you would just remove the corresponding component
+        from the set, like this and the process would be stopped immediately
+        (and not started on the next startup):
+      <screen>> <userinput>config remove Boss/components b10-xfrout</userinput>
+> <userinput>config commit</userinput></screen>
+      </para>
+
+      <para>
+        To add a process to the set, let's say the resolver (which not started
+        by default), you would do this:
+        <screen>> <userinput>config add Boss/components b10-resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
+> <userinput>config commit</userinput></screen></para>
+
+      <para>
+        Now, what it means. We add an entry called b10-resolver. It is both a
+        name used to reference this component in the configuration and the
+        name of the process to start. Then we set some parameters on how to
+        start it.
+      </para>
+
+      <para>
+        The special one is for components that need some kind of special care
+        during startup or shutdown. Unless specified, the component is started
+        in usual way. This is the list of components that need to be started
+        in a special way, with the value of special used for them:
+        <table>
+          <tgroup cols='3' align='left'>
+          <colspec colname='component'/>
+          <colspec colname='special'/>
+          <colspec colname='description'/>
+          <thead><row><entry>Component</entry><entry>Special</entry><entry>Description</entry></row></thead>
+          <tbody>
+            <row><entry>b10-auth</entry><entry>auth</entry><entry>Authoritative server</entry></row>
+            <row><entry>b10-resolver</entry><entry>resolver</entry><entry>The resolver</entry></row>
+            <row><entry>b10-cmdctl</entry><entry>cmdctl</entry><entry>The command control (remote control interface)</entry></row>
+            <row><entry>setuid</entry><entry>setuid</entry><entry>Virtual component, see below</entry></row>
+            <!-- TODO Either add xfrin and xfrout as well or clean up the workarounds in boss before the release -->
+          </tbody>
+          </tgroup>
+        </table>
+      </para>
+
+      <para>
+	The kind specifies how a failure of the component should
+	be handled.  If it is set to <quote>dispensable</quote>
+	(the default unless you set something else), it will get
+	started again if it fails. If it is set to <quote>needed</quote>
+	and it fails at startup, the whole <command>bind10</command>
+	shuts down and exits with error exit code. But if it fails
+	some time later, it is just started again. If you set it
+	to <quote>core</quote>, you indicate that the system is
+	not usable without the component and if such component
+	fails, the system shuts down no matter when the failure
+	happened.  This is the behaviour of the core components
+	(the ones you can't turn off), but you can declare any
+	other components as core as well if you wish (but you can
+	turn these off, they just can't fail).
+      </para>
+
+      <para>
+        The priority defines order in which the components should start.
+        The ones with higher number are started sooner than the ones with
+        lower ones. If you don't set it, 0 (zero) is used as the priority.
+      </para>
+
+      <para>
+        There are other parameters we didn't use in our example.
+	One of them is <quote>address</quote>. It is the address
+	used by the component on the <command>b10-msgq</command>
+	message bus. The special components already know their
+	address, but the usual ones don't. The address is by
+	convention the thing after <emphasis>b10-</emphasis>, with
+	the first letter capital (eg. <command>b10-stats</command>
+	would have <quote>Stats</quote> as its address).
+<!-- TODO: this should be simplified so we don't even have to document it -->
+      </para>
+
+<!-- TODO: what does "The special components already know their
+address, but the usual ones don't." mean? -->
+
+<!-- TODO: document params when is enabled -->
+
+      <para>
+        The last one is process. It is the name of the process to be started.
+        It defaults to the name of the component if not set, but you can use
+        this to override it.
+      </para>
+
+      <!-- TODO Add parameters when they work, not implemented yet-->
+
+      <note>
+        <para>
+          This system allows you to start the same component multiple times
+          (by including it in the configuration with different names, but the
+          same process setting). However, the rest of the system doesn't expect
+          such situation, so it would probably not do what you want. Such
+          support is yet to be implemented.
+        </para>
+      </note>
+
+      <note>
+        <para>
+	  The configuration is quite powerful, but that includes
+	  a lot of space for mistakes. You could turn off the
+	  <command>b10-cmdctl</command>, but then you couldn't
+	  change it back the usual way, as it would require it to
+	  be running (you would have to find and edit the configuration
+	  directly).  Also, some modules might have dependencies
+	  -- <command>b10-stats-httpd</command> need
+	  <command>b10-stats</command>, <command>b10-xfrout</command>
+	  needs the <command>b10-auth</command> to be running, etc.
+
+<!-- TODO: should we define dependencies? -->
+
+        </para>
+        <para>
+          In short, you should think twice before disabling something here.
+        </para>
+      </note>
+
+      <para>
+	Now, to the mysterious setuid virtual component. If you
+	use the <command>-u</command> option to start the
+	<command>bind10</command> as root, but change the user
+	later, we need to start the <command>b10-auth</command> or
+	<command>b10-resolver</command> as root (until the socket
+	creator is finished).<!-- TODO --> So we need to specify
+	the time when the switch from root do the given user happens
+	and that's what the setuid component is for. The switch is
+	done at the time the setuid component would be started, if
+	it was a process. The default configuration contains the
+	setuid component with priority 5, <command>b10-auth</command>
+	has 10 to be started before the switch and everything else
+	is without priority, so it is started after the switch.
+      </para>
+
+    </section>
 
   </chapter>
 
@@ -1369,20 +1527,72 @@ what if a NOTIFY is sent?
       The <command>b10-xfrout</command> process is started by
       <command>bind10</command>.
       When the <command>b10-auth</command> authoritative DNS server
-      receives an AXFR request, <command>b10-xfrout</command>
-      sends the zone.
-      This is used to provide master DNS service to share zones
+      receives an AXFR or IXFR request, <command>b10-auth</command>
+      internally forwards the request to <command>b10-xfrout</command>,
+      which handles the rest of request processing.
+      This is used to provide primary DNS service to share zones
       to secondary name servers.
       The <command>b10-xfrout</command> is also used to send
-      NOTIFY messages to slaves.
+      NOTIFY messages to secondary servers.
+    </para>
+
+    <para>
+      A global or per zone <option>transfer_acl</option> configuration
+      can be used to control accessibility of the outbound zone
+      transfer service.
+      By default, <command>b10-xfrout</command> allows any clients to
+      perform zone transfers for any zones:
+    </para>
+
+      <screen>> <userinput>config show Xfrout/transfer_acl</userinput>
+Xfrout/transfer_acl[0]	{"action": "ACCEPT"}	any	(default)</screen>
+
+    <para>
+      You can change this to, for example, rejecting all transfer
+      requests by default while allowing requests for the transfer
+      of zone "example.com" from 192.0.2.1 and 2001:db8::1 as follows:
     </para>
 
+      <screen>> <userinput>config set Xfrout/transfer_acl[0] {"action": "REJECT"}</userinput>
+> <userinput>config add Xfrout/zone_config</userinput>
+> <userinput>config set Xfrout/zone_config[0]/origin "example.com"</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1"},</userinput>
+<userinput>                                                 {"action": "ACCEPT", "from": "2001:db8::1"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
     <note><simpara>
-     The current development release of BIND 10 only supports
-     AXFR. (IXFR is not supported.)
-     Access control is not yet provided.
+	In the above example the lines
+	for <option>transfer_acl</option> were divided for
+	readability.  In the actual input it must be in a single line.
     </simpara></note>
 
+    <para>
+      If you want to require TSIG in access control, a separate TSIG
+      "key ring" must be configured specifically
+      for <command>b10-xfrout</command> as well as a system wide
+      key ring, both containing a consistent set of keys.
+      For example, to change the previous example to allowing requests
+      from 192.0.2.1 signed by a TSIG with a key name of
+      "key.example", you'll need to do this:
+    </para>
+
+    <screen>> <userinput>config set tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/tsig_keys/keys ["key.example:<base64-key>"]</userinput>
+> <userinput>config set Xfrout/zone_config[0]/transfer_acl [{"action": "ACCEPT", "from": "192.0.2.1", "key": "key.example"}]</userinput>
+> <userinput>config commit</userinput></screen>
+
+    <para>
+      The first line of configuration defines a system wide key ring.
+      This is necessary because the <command>b10-auth</command> server
+      also checks TSIGs and it uses the system wide configuration.
+    </para>
+
+    <note><simpara>
+	In a future version, <command>b10-xfrout</command> will also
+	use the system wide TSIG configuration.
+	The way to specify zone specific configuration (ACLs, etc) is
+	likely to be changed, too.
+    </simpara></note>
 
 <!--
 TODO:
@@ -1442,8 +1652,13 @@ what is XfroutClient xfr_client??
       You may change this using <command>bindctl</command>, for example:
 
       <screen>
-> <userinput>config set Boss/start_auth false</userinput>
-> <userinput>config set Boss/start_resolver true</userinput>
+> <userinput>config remove Boss/components b10-xfrout</userinput>
+> <userinput>config remove Boss/components b10-xfrin</userinput>
+> <userinput>config remove Boss/components b10-auth</userinput>
+> <userinput>config add Boss/components b10-resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/special resolver</userinput>
+> <userinput>config set Boss/components/b10-resolver/kind needed</userinput>
+> <userinput>config set Boss/components/b10-resolver/priority 10</userinput>
 > <userinput>config commit</userinput>
 </screen>
 
diff --git a/doc/guide/bind10-messages.html b/doc/guide/bind10-messages.html
index 237b7ad..f2f57f1 100644
--- a/doc/guide/bind10-messages.html
+++ b/doc/guide/bind10-messages.html
@@ -1,10 +1,10 @@
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20110809. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229460045"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
-        20110809.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"><title>BIND 10 Messages Manual</title><link rel="stylesheet" href="./bind10-guide.css" type="text/css"><meta name="generator" content="DocBook XSL Stylesheets V1.75.2"><meta name="description" content="BIND 10 is a Domain Name System (DNS) suite managed by Internet Systems Consortium (ISC). It includes DNS libraries and modular components for controlling authoritative and recursive DNS servers. This is the messages manual for BIND 10 version 20111021. The most up-to-date version of this document, along with other documents for BIND 10, can be found at ."></head><body bgcolor="white" text="black" link="#0000FF" vlink="#840084" alink="#0000FF"><div class="book" title="BIND 10 Messages Manual"><div class="titlepage"><div><div><h1 class="title"><a name="id1168229451102"></a>BIND 10 Messages Manual</h1></div><div><p class="releaseinfo">This is the messages manual for BIND 10 version
+        20111021.</p></div><div><p class="copyright">Copyright © 2011 Internet Systems Consortium, Inc.</p></div><div><div class="abstract" title="Abstract"><p class="title"><b>Abstract</b></p><p>BIND 10 is a Domain Name System (DNS) suite managed by
 	  Internet Systems Consortium (ISC). It includes DNS libraries
 	  and modular components for controlling authoritative and
 	  recursive DNS servers.
       </p><p>
-        This is the messages manual for BIND 10 version 20110809.
+        This is the messages manual for BIND 10 version 20111021.
 	    The most up-to-date version of this document, along with
 	    other documents for BIND 10, can be found at
         <a class="ulink" href="http://bind10.isc.org/docs" target="_top">http://bind10.isc.org/docs</a>.
@@ -107,6 +107,9 @@ This is a debug message, generated by the authoritative server when an
 attempt to parse the header of a received DNS packet has failed. (The
 reason for the failure is given in the message.) The server will drop the
 packet.
+</p></dd><dt><a name="AUTH_INVALID_STATISTICS_DATA"></a><span class="term">AUTH_INVALID_STATISTICS_DATA invalid specification of statistics data specified</span></dt><dd><p>
+An error was encountered when the authoritiative server specified
+statistics data which is invalid for the auth specification file.
 </p></dd><dt><a name="AUTH_LOAD_TSIG"></a><span class="term">AUTH_LOAD_TSIG loading TSIG keys</span></dt><dd><p>
 This is a debug message indicating that the authoritative server
 has requested the keyring holding TSIG keys from the configuration
@@ -263,12 +266,58 @@ NOTIFY request will not be honored.
 The boss process is starting up and will now check if the message bus
 daemon is already running. If so, it will not be able to start, as it
 needs a dedicated message bus.
-</p></dd><dt><a name="BIND10_CONFIGURATION_START_AUTH"></a><span class="term">BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</span></dt><dd><p>
-This message shows whether or not the authoritative server should be
-started according to the configuration.
-</p></dd><dt><a name="BIND10_CONFIGURATION_START_RESOLVER"></a><span class="term">BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</span></dt><dd><p>
-This message shows whether or not the resolver should be
-started according to the configuration.
+</p></dd><dt><a name="BIND10_COMPONENT_FAILED"></a><span class="term">BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</span></dt><dd><p>
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
+</p></dd><dt><a name="BIND10_COMPONENT_RESTART"></a><span class="term">BIND10_COMPONENT_RESTART component %1 is about to restart</span></dt><dd><p>
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</p></dd><dt><a name="BIND10_COMPONENT_START"></a><span class="term">BIND10_COMPONENT_START component %1 is starting</span></dt><dd><p>
+The named component is about to be started by the boss process.
+</p></dd><dt><a name="BIND10_COMPONENT_START_EXCEPTION"></a><span class="term">BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</span></dt><dd><p>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</p></dd><dt><a name="BIND10_COMPONENT_STOP"></a><span class="term">BIND10_COMPONENT_STOP component %1 is being stopped</span></dt><dd><p>
+A component is about to be asked to stop willingly by the boss.
+</p></dd><dt><a name="BIND10_COMPONENT_UNSATISFIED"></a><span class="term">BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</span></dt><dd><p>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_BUILD"></a><span class="term">BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'</span></dt><dd><p>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_PLAN_INTERRUPTED"></a><span class="term">BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</span></dt><dd><p>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_RECONFIGURE"></a><span class="term">BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</span></dt><dd><p>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_RUN"></a><span class="term">BIND10_CONFIGURATOR_RUN running plan of %1 tasks</span></dt><dd><p>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_START"></a><span class="term">BIND10_CONFIGURATOR_START bind10 component configurator is starting up</span></dt><dd><p>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</p></dd><dt><a name="BIND10_CONFIGURATOR_STOP"></a><span class="term">BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</span></dt><dd><p>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</p></dd><dt><a name="BIND10_CONFIGURATOR_TASK"></a><span class="term">BIND10_CONFIGURATOR_TASK performing task %1 on %2</span></dt><dd><p>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
+</p></dd><dt><a name="BIND10_INVALID_STATISTICS_DATA"></a><span class="term">BIND10_INVALID_STATISTICS_DATA invalid specification of statistics data specified</span></dt><dd><p>
+An error was encountered when the boss module specified
+statistics data which is invalid for the boss specification file.
 </p></dd><dt><a name="BIND10_INVALID_USER"></a><span class="term">BIND10_INVALID_USER invalid user: %1</span></dt><dd><p>
 The boss process was started with the -u option, to drop root privileges
 and continue running as the specified user, but the user is unknown.
@@ -284,24 +333,14 @@ There already appears to be a message bus daemon running. Either an
 old process was not shut down correctly, and needs to be killed, or
 another instance of BIND10, with the same msgq domain socket, is
 running, which needs to be stopped.
-</p></dd><dt><a name="BIND10_MSGQ_DAEMON_ENDED"></a><span class="term">BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</span></dt><dd><p>
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
 </p></dd><dt><a name="BIND10_MSGQ_DISAPPEARED"></a><span class="term">BIND10_MSGQ_DISAPPEARED msgq channel disappeared</span></dt><dd><p>
 While listening on the message bus channel for messages, it suddenly
 disappeared. The msgq daemon may have died. This might lead to an
 inconsistent state of the system, and BIND 10 will now shut down.
-</p></dd><dt><a name="BIND10_PROCESS_ENDED_NO_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</span></dt><dd><p>
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-</p></dd><dt><a name="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS"></a><span class="term">BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</span></dt><dd><p>
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+</p></dd><dt><a name="BIND10_PROCESS_ENDED"></a><span class="term">BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</span></dt><dd><p>
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
 </p></dd><dt><a name="BIND10_READING_BOSS_CONFIGURATION"></a><span class="term">BIND10_READING_BOSS_CONFIGURATION reading boss configuration</span></dt><dd><p>
 The boss process is starting up, and will now process the initial
 configuration, as received from the configuration manager.
@@ -327,6 +366,8 @@ so BIND 10 will now shut down. The specific error is printed.
 The boss module is sending a SIGKILL signal to the given process.
 </p></dd><dt><a name="BIND10_SEND_SIGTERM"></a><span class="term">BIND10_SEND_SIGTERM sending SIGTERM to %1 (PID %2)</span></dt><dd><p>
 The boss module is sending a SIGTERM signal to the given process.
+</p></dd><dt><a name="BIND10_SETUID"></a><span class="term">BIND10_SETUID setting UID to %1</span></dt><dd><p>
+The boss switches the user it runs as to the given UID.
 </p></dd><dt><a name="BIND10_SHUTDOWN"></a><span class="term">BIND10_SHUTDOWN stopping the server</span></dt><dd><p>
 The boss process received a command or signal telling it to shut down.
 It will send a shutdown command to each process. The processes that do
@@ -341,10 +382,6 @@ which failed is unknown (not one of 'S' for socket or 'B' for bind).
 </p></dd><dt><a name="BIND10_SOCKCREATOR_BAD_RESPONSE"></a><span class="term">BIND10_SOCKCREATOR_BAD_RESPONSE unknown response for socket request: %1</span></dt><dd><p>
 The boss requested a socket from the creator, but the answer is unknown. This
 looks like a programmer error.
-</p></dd><dt><a name="BIND10_SOCKCREATOR_CRASHED"></a><span class="term">BIND10_SOCKCREATOR_CRASHED the socket creator crashed</span></dt><dd><p>
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
 </p></dd><dt><a name="BIND10_SOCKCREATOR_EOF"></a><span class="term">BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</span></dt><dd><p>
 There should be more data from the socket creator, but it closed the socket.
 It probably crashed.
@@ -368,12 +405,18 @@ The socket creator failed to create the requested socket. It failed on the
 indicated OS API function with given error.
 </p></dd><dt><a name="BIND10_SOCKET_GET"></a><span class="term">BIND10_SOCKET_GET requesting socket [%1]:%2 of type %3 from the creator</span></dt><dd><p>
 The boss forwards a request for a socket to the socket creator.
+</p></dd><dt><a name="BIND10_STARTED_CC"></a><span class="term">BIND10_STARTED_CC started configuration/command session</span></dt><dd><p>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
 </p></dd><dt><a name="BIND10_STARTED_PROCESS"></a><span class="term">BIND10_STARTED_PROCESS started %1</span></dt><dd><p>
 The given process has successfully been started.
 </p></dd><dt><a name="BIND10_STARTED_PROCESS_PID"></a><span class="term">BIND10_STARTED_PROCESS_PID started %1 (PID %2)</span></dt><dd><p>
 The given process has successfully been started, and has the given PID.
 </p></dd><dt><a name="BIND10_STARTING"></a><span class="term">BIND10_STARTING starting BIND10: %1</span></dt><dd><p>
 Informational message on startup that shows the full version.
+</p></dd><dt><a name="BIND10_STARTING_CC"></a><span class="term">BIND10_STARTING_CC starting configuration/command session</span></dt><dd><p>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
 </p></dd><dt><a name="BIND10_STARTING_PROCESS"></a><span class="term">BIND10_STARTING_PROCESS starting process %1</span></dt><dd><p>
 The boss module is starting the given process.
 </p></dd><dt><a name="BIND10_STARTING_PROCESS_PORT"></a><span class="term">BIND10_STARTING_PROCESS_PORT starting process %1 (to listen on port %2)</span></dt><dd><p>
@@ -387,8 +430,24 @@ All modules have been successfully started, and BIND 10 is now running.
 </p></dd><dt><a name="BIND10_STARTUP_ERROR"></a><span class="term">BIND10_STARTUP_ERROR error during startup: %1</span></dt><dd><p>
 There was a fatal error when BIND10 was trying to start. The error is
 shown, and BIND10 will now shut down.
-</p></dd><dt><a name="BIND10_START_AS_NON_ROOT"></a><span class="term">BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</span></dt><dd><p>
-The given module is being started or restarted without root privileges.
+</p></dd><dt><a name="BIND10_STARTUP_UNEXPECTED_MESSAGE"></a><span class="term">BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</span></dt><dd><p>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected.  It may be that processes are starting
+of sequence.
+</p></dd><dt><a name="BIND10_STARTUP_UNRECOGNISED_MESSAGE"></a><span class="term">BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</span></dt><dd><p>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is not recognised.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT_AUTH"></a><span class="term">BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</span></dt><dd><p>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</p></dd><dt><a name="BIND10_START_AS_NON_ROOT_RESOLVER"></a><span class="term">BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</span></dt><dd><p>
+The resolver is being started or restarted without root privileges.
 If the module needs these privileges, it may have problems starting.
 Note that this issue should be resolved by the pending 'socket-creator'
 process; once that has been implemented, modules should not need root
@@ -399,6 +458,15 @@ the message channel.
 </p></dd><dt><a name="BIND10_UNKNOWN_CHILD_PROCESS_ENDED"></a><span class="term">BIND10_UNKNOWN_CHILD_PROCESS_ENDED unknown child pid %1 exited</span></dt><dd><p>
 An unknown child process has exited. The PID is printed, but no further
 action will be taken by the boss process.
+</p></dd><dt><a name="BIND10_WAIT_CFGMGR"></a><span class="term">BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</span></dt><dd><p>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing.  This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up.  The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
 </p></dd><dt><a name="CACHE_ENTRY_MISSING_RRSET"></a><span class="term">CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</span></dt><dd><p>
 The cache tried to generate the complete answer message. It knows the structure
 of the message, but some of the RRsets to be put there are not in cache (they
@@ -487,7 +555,7 @@ Debug message. The RRset cache to hold at most this many RRsets for the given
 class is being created.
 </p></dd><dt><a name="CACHE_RRSET_LOOKUP"></a><span class="term">CACHE_RRSET_LOOKUP looking up %1/%2/%3 in RRset cache</span></dt><dd><p>
 Debug message. The resolver is trying to look up data in the RRset cache.
-</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3</span></dt><dd><p>
+</p></dd><dt><a name="CACHE_RRSET_NOT_FOUND"></a><span class="term">CACHE_RRSET_NOT_FOUND no RRset found for %1/%2/%3 in cache</span></dt><dd><p>
 Debug message which can follow CACHE_RRSET_LOOKUP. This means the data is not
 in the cache.
 </p></dd><dt><a name="CACHE_RRSET_REMOVE_OLD"></a><span class="term">CACHE_RRSET_REMOVE_OLD removing old RRset for %1/%2/%3 to make space for new one</span></dt><dd><p>
@@ -642,6 +710,8 @@ The user was denied because the SSL connection could not successfully
 be set up. The specific error is given in the log message. Possible
 causes may be that the ssl request itself was bad, or the local key or
 certificate file could not be read.
+</p></dd><dt><a name="CMDCTL_STARTED"></a><span class="term">CMDCTL_STARTED cmdctl is listening for connections on %1:%2</span></dt><dd><p>
+The cmdctl daemon has started and is now listening for connections.
 </p></dd><dt><a name="CMDCTL_STOPPED_BY_KEYBOARD"></a><span class="term">CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
 There was a keyboard interrupt signal to stop the cmdctl daemon. The
 daemon will now shut down.
@@ -756,28 +826,18 @@ Debug information. An item is being removed from the hotspot cache.
 The maximum allowed number of items of the hotspot cache is set to the given
 number. If there are too many, some of them will be dropped. The size of 0
 means no limit.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_ERROR error retrieving data from datasource %1: %2</span></dt><dd><p>
-This was an internal error while reading data from a datasource. This can either
-mean the specific data source implementation is not behaving correctly, or the
-data it provides is invalid. The current search is aborted.
-The error message contains specific information about the error.
+</p></dd><dt><a name="DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED"></a><span class="term">DATASRC_DATABASE_COVER_NSEC_UNSUPPORTED %1 doesn't support DNSSEC when asked for NSEC data covering %2</span></dt><dd><p>
+The datasource tried to provide an NSEC proof that the named domain does not
+exist, but the database backend doesn't support DNSSEC. No proof is included
+in the answer as a result.
 </p></dd><dt><a name="DATASRC_DATABASE_FIND_RECORDS"></a><span class="term">DATASRC_DATABASE_FIND_RECORDS looking in datasource %1 for record %2/%3</span></dt><dd><p>
 Debug information. The database data source is looking up records with the given
 name and type in the database.
 </p></dd><dt><a name="DATASRC_DATABASE_FIND_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_FIND_TTL_MISMATCH TTL values differ in %1 for elements of %2/%3/%4, setting to %5</span></dt><dd><p>
 The datasource backend provided resource records for the given RRset with
-different TTL values. The TTL of the RRSET is set to the lowest value, which
-is printed in the log message.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ERROR uncaught general error retrieving data from datasource %1: %2</span></dt><dd><p>
-There was an uncaught general exception while reading data from a datasource.
-This most likely points to a logic error in the code, and can be considered a
-bug. The current search is aborted. Specific information about the exception is
-printed in this error message.
-</p></dd><dt><a name="DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR"></a><span class="term">DATASRC_DATABASE_FIND_UNCAUGHT_ISC_ERROR uncaught error retrieving data from datasource %1: %2</span></dt><dd><p>
-There was an uncaught ISC exception while reading data from a datasource. This
-most likely points to a logic error in the code, and can be considered a bug.
-The current search is aborted. Specific information about the exception is
-printed in this error message.
+different TTL values. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
 </p></dd><dt><a name="DATASRC_DATABASE_FOUND_DELEGATION"></a><span class="term">DATASRC_DATABASE_FOUND_DELEGATION Found delegation at %2 in %1</span></dt><dd><p>
 When searching for a domain, the program met a delegation to a different zone
 at the given domain name. It will return that one instead.
@@ -789,6 +849,10 @@ It will return the NS record instead.
 When searching for a domain, the program met a DNAME redirection to a different
 place in the domain space at the given domain name. It will return that one
 instead.
+</p></dd><dt><a name="DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL"></a><span class="term">DATASRC_DATABASE_FOUND_EMPTY_NONTERMINAL empty non-terminal %2 in %1</span></dt><dd><p>
+The domain name doesn't have any RRs, so it doesn't exist in the database.
+However, it has a subdomain, so it exists in the DNS address space. So we
+return NXRRSET instead of NXDOMAIN.
 </p></dd><dt><a name="DATASRC_DATABASE_FOUND_NXDOMAIN"></a><span class="term">DATASRC_DATABASE_FOUND_NXDOMAIN search in datasource %1 resulted in NXDOMAIN for %2/%3/%4</span></dt><dd><p>
 The data returned by the database backend did not contain any data for the given
 domain name, class and type.
@@ -799,6 +863,91 @@ name and class, but not for the given type.
 The data returned by the database backend contained data for the given domain
 name, and it either matches the type or has a relevant type. The RRset that is
 returned is printed.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE"></a><span class="term">DATASRC_DATABASE_ITERATE iterating zone %1</span></dt><dd><p>
+The program is reading the whole zone, eg. not searching for data, but going
+through each of the RRsets there.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_END"></a><span class="term">DATASRC_DATABASE_ITERATE_END iterating zone finished</span></dt><dd><p>
+While iterating through the zone, the program reached end of the data.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_NEXT"></a><span class="term">DATASRC_DATABASE_ITERATE_NEXT next RRset in zone is %1/%2</span></dt><dd><p>
+While iterating through the zone, the program extracted next RRset from it.
+The name and RRtype of the RRset is indicated in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_ITERATE_TTL_MISMATCH"></a><span class="term">DATASRC_DATABASE_ITERATE_TTL_MISMATCH TTL values differ for RRs of %1/%2/%3, setting to %4</span></dt><dd><p>
+While iterating through the zone, the time to live for RRs of the given RRset
+were found to be different. This isn't allowed on the wire and is considered
+an error, so we set it to the lowest value we found (but we don't modify the
+database). The data in database should be checked and fixed.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_END"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</span></dt><dd><p>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences.  The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_NEXT"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</span></dt><dd><p>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset.  The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADER_START"></a><span class="term">DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</span></dt><dd><p>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source.  The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</p></dd><dt><a name="DATASRC_DATABASE_JOURNALREADR_BADDATA"></a><span class="term">DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</span></dt><dd><p>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset.  The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message.  The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_COMMIT"></a><span class="term">DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</span></dt><dd><p>
+Debug information.  A set of updates to a zone has been successfully
+committed to the corresponding database backend.  The zone name,
+its class and the database name are printed.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_CREATED"></a><span class="term">DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3</span></dt><dd><p>
+Debug information.  A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_DESTROYED"></a><span class="term">DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3</span></dt><dd><p>
+Debug information.  A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_ROLLBACK"></a><span class="term">DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3</span></dt><dd><p>
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes.  The intermediate changes made through the updater won't
+be applied to the underlying database.  The zone name, its class, and
+the underlying database name are shown in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_UPDATER_ROLLBACKFAIL"></a><span class="term">DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4</span></dt><dd><p>
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails.  The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation.  In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid.  The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD"></a><span class="term">DATASRC_DATABASE_WILDCARD constructing RRset %3 from wildcard %2 in %1</span></dt><dd><p>
+The database doesn't contain directly matching domain, but it does contain a
+wildcard one which is being used to synthesize the answer.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_CANCEL_NS"></a><span class="term">DATASRC_DATABASE_WILDCARD_CANCEL_NS canceled wildcard match on %2 because %3 contains NS in %1</span></dt><dd><p>
+The database was queried to provide glue data and it didn't find direct match.
+It could create it from given wildcard, but matching wildcards is forbidden
+under a zone cut, which was found. Therefore the delegation will be returned
+instead.
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_CANCEL_SUB"></a><span class="term">DATASRC_DATABASE_WILDCARD_CANCEL_SUB wildcard %2 can't be used to construct %3 because %4 exists in %1</span></dt><dd><p>
+The answer could be constructed using the wildcard, but the given subdomain
+exists, therefore this name is something like empty non-terminal (actually,
+from the protocol point of view, it is empty non-terminal, but the code
+discovers it differently).
+</p></dd><dt><a name="DATASRC_DATABASE_WILDCARD_EMPTY"></a><span class="term">DATASRC_DATABASE_WILDCARD_EMPTY implicit wildcard %2 used to construct %3 in %1</span></dt><dd><p>
+The given wildcard exists implicitly in the domainspace, as empty nonterminal
+(eg. there's something like subdomain.*.example.org, so *.example.org exists
+implicitly, but is empty). This will produce NXRRSET, because the constructed
+domain is empty as well as the wildcard.
 </p></dd><dt><a name="DATASRC_DO_QUERY"></a><span class="term">DATASRC_DO_QUERY handling query for '%1/%2'</span></dt><dd><p>
 A debug message indicating that a query for the given name and RR type is being
 processed.
@@ -1138,6 +1287,19 @@ data source.
 </p></dd><dt><a name="DATASRC_UNEXPECTED_QUERY_STATE"></a><span class="term">DATASRC_UNEXPECTED_QUERY_STATE unexpected query state</span></dt><dd><p>
 This indicates a programming error. An internal task of unknown type was
 generated.
+</p></dd><dt><a name="LIBXFRIN_DIFFERENT_TTL"></a><span class="term">LIBXFRIN_DIFFERENT_TTL multiple data with different TTLs (%1, %2) on %3/%4. Adjusting %2 -> %1.</span></dt><dd><p>
+The xfrin module received an update containing multiple rdata changes for the
+same RRset. But the TTLs of these don't match each other. As we combine them
+together, the later one get's overwritten to the earlier one in the sequence.
+</p></dd><dt><a name="LIBXFRIN_NO_JOURNAL"></a><span class="term">LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</span></dt><dd><p>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled.  At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR).  Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
 </p></dd><dt><a name="LOGIMPL_ABOVE_MAX_DEBUG"></a><span class="term">LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</span></dt><dd><p>
 A message from the interface to the underlying logger implementation reporting
 that the debug level (as set by an internally-created string DEBUGn, where n
@@ -1259,6 +1421,16 @@ Within a message file, a line starting with a dollar symbol was found
 </p></dd><dt><a name="LOG_WRITE_ERROR"></a><span class="term">LOG_WRITE_ERROR error writing to %1: %2</span></dt><dd><p>
 The specified error was encountered by the message compiler when writing
 to the named output file.
+</p></dd><dt><a name="NOTIFY_OUT_DATASRC_ACCESS_FAILURE"></a><span class="term">NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</span></dt><dd><p>
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message.  This can be either a
+configuration error or installation setup failure.
+</p></dd><dt><a name="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND"></a><span class="term">NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</span></dt><dd><p>
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source.  This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process.  So this means some critical inconsistency in the data
+source or software bug.
 </p></dd><dt><a name="NOTIFY_OUT_INVALID_ADDRESS"></a><span class="term">NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</span></dt><dd><p>
 The notify_out library tried to send a notify message to the given
 address, but it appears to be an invalid address. The configuration
@@ -1315,6 +1487,13 @@ provide more information.
 The notify message to the given address (noted as address#port) has
 timed out, and the message will be resent until the max retry limit
 is reached.
+</p></dd><dt><a name="NOTIFY_OUT_ZONE_BAD_SOA"></a><span class="term">NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</span></dt><dd><p>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs.  Notify message won't
+be sent to such a zone.
+</p></dd><dt><a name="NOTIFY_OUT_ZONE_NO_NS"></a><span class="term">NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</span></dt><dd><p>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR.  Notify message won't be sent to such a zone.
 </p></dd><dt><a name="NSAS_FIND_NS_ADDRESS"></a><span class="term">NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</span></dt><dd><p>
 A debug message issued when the NSAS (nameserver address store - part
 of the resolver) is making a callback into the resolver to retrieve the
@@ -1732,6 +1911,11 @@ respond with 'Stats Httpd is up.' and its PID.
 An unknown command has been sent to the stats-httpd module. The
 stats-httpd module will respond with an error, and the command will
 be ignored.
+</p></dd><dt><a name="STATHTTPD_SERVER_DATAERROR"></a><span class="term">STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</span></dt><dd><p>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
 </p></dd><dt><a name="STATHTTPD_SERVER_ERROR"></a><span class="term">STATHTTPD_SERVER_ERROR HTTP server error: %1</span></dt><dd><p>
 An internal error occurred while handling an HTTP request. An HTTP 500
 response will be sent back, and the specific error is printed. This
@@ -1776,14 +1960,10 @@ control bus. A likely problem is that the message bus daemon
 </p></dd><dt><a name="STATS_RECEIVED_NEW_CONFIG"></a><span class="term">STATS_RECEIVED_NEW_CONFIG received new configuration: %1</span></dt><dd><p>
 This debug message is printed when the stats module has received a
 configuration update from the configuration manager.
-</p></dd><dt><a name="STATS_RECEIVED_REMOVE_COMMAND"></a><span class="term">STATS_RECEIVED_REMOVE_COMMAND received command to remove %1</span></dt><dd><p>
-A remove command for the given name was sent to the stats module, and
-the given statistics value will now be removed. It will not appear in
-statistics reports until it appears in a statistics update from a
-module again.
-</p></dd><dt><a name="STATS_RECEIVED_RESET_COMMAND"></a><span class="term">STATS_RECEIVED_RESET_COMMAND received command to reset all statistics</span></dt><dd><p>
-The stats module received a command to clear all collected statistics.
-The data is cleared until it receives an update from the modules again.
+</p></dd><dt><a name="STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOWSCHEMA_ALL_COMMAND received command to show all statistics schema</span></dt><dd><p>
+The stats module received a command to show all statistics schemas of all modules.
+</p></dd><dt><a name="STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND"></a><span class="term">STATS_RECEIVED_SHOWSCHEMA_NAME_COMMAND received command to show statistics schema for %1</span></dt><dd><p>
+The stats module received a command to show the specified statistics schema of the specified module.
 </p></dd><dt><a name="STATS_RECEIVED_SHOW_ALL_COMMAND"></a><span class="term">STATS_RECEIVED_SHOW_ALL_COMMAND received command to show all statistics</span></dt><dd><p>
 The stats module received a command to show all statistics that it has
 collected.
@@ -1801,6 +1981,11 @@ will respond with an error and the command will be ignored.
 </p></dd><dt><a name="STATS_SEND_REQUEST_BOSS"></a><span class="term">STATS_SEND_REQUEST_BOSS requesting boss to send statistics</span></dt><dd><p>
 This debug message is printed when a request is sent to the boss module
 to send its data to the stats module.
+</p></dd><dt><a name="STATS_STARTING"></a><span class="term">STATS_STARTING starting</span></dt><dd><p>
+The stats module will be now starting.
+</p></dd><dt><a name="STATS_START_ERROR"></a><span class="term">STATS_START_ERROR stats module error: %1</span></dt><dd><p>
+An internal error occurred while starting the stats module. The stats
+module will be now shutting down.
 </p></dd><dt><a name="STATS_STOPPED_BY_KEYBOARD"></a><span class="term">STATS_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</span></dt><dd><p>
 There was a keyboard interrupt signal to stop the stats module. The
 daemon will now shut down.
@@ -1812,19 +1997,23 @@ from a different version of BIND 10 than the stats module itself.
 Please check your installation.
 </p></dd><dt><a name="XFRIN_AXFR_DATABASE_FAILURE"></a><span class="term">XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
 The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_INTERNAL_FAILURE"></a><span class="term">XFRIN_AXFR_INTERNAL_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
-The AXFR transfer for the given zone has failed due to an internal
-problem in the bind10 python wrapper library.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_AXFR_TRANSFER_FAILURE AXFR transfer of zone %1 failed: %2</span></dt><dd><p>
-The AXFR transfer for the given zone has failed due to a protocol error.
-The error is shown in the log message.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_STARTED"></a><span class="term">XFRIN_AXFR_TRANSFER_STARTED AXFR transfer of zone %1 started</span></dt><dd><p>
-A connection to the master server has been made, the serial value in
-the SOA record has been checked, and a zone transfer has been started.
-</p></dd><dt><a name="XFRIN_AXFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_AXFR_TRANSFER_SUCCESS AXFR transfer of zone %1 succeeded</span></dt><dd><p>
-The AXFR transfer of the given zone was successfully completed.
+The error is shown in the log message.  Note: due to the code structure
+this can only happen for AXFR.
+</p></dd><dt><a name="XFRIN_AXFR_INCONSISTENT_SOA"></a><span class="term">XFRIN_AXFR_INCONSISTENT_SOA AXFR SOAs are inconsistent for %1: %2 expected, %3 received</span></dt><dd><p>
+The serial fields of the first and last SOAs of AXFR (including AXFR-style
+IXFR) are not the same.  According to RFC 5936 these two SOAs must be the
+"same" (not only for the serial), but it is still not clear what the
+receiver should do if this condition does not hold.  There was a discussion
+about this at the IETF dnsext wg:
+http://www.ietf.org/mail-archive/web/dnsext/current/msg07908.html
+and the general feeling seems that it would be better to reject the
+transfer if a mismatch is detected.  On the other hand, also as noted
+in that email thread, neither BIND 9 nor NSD performs any comparison
+on the SOAs.  For now, we only check the serials (ignoring other fields)
+and only leave a warning log message when a mismatch is found.  If it
+turns out to happen with a real world primary server implementation
+and that server actually feeds broken data (e.g. mixed versions of
+zone), we can consider a stricter action.
 </p></dd><dt><a name="XFRIN_BAD_MASTER_ADDR_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_ADDR_FORMAT bad format for master address: %1</span></dt><dd><p>
 The given master address is not a valid IP address.
 </p></dd><dt><a name="XFRIN_BAD_MASTER_PORT_FORMAT"></a><span class="term">XFRIN_BAD_MASTER_PORT_FORMAT bad format for master port: %1</span></dt><dd><p>
@@ -1843,6 +2032,17 @@ error is given in the log message.
 </p></dd><dt><a name="XFRIN_CONNECT_MASTER"></a><span class="term">XFRIN_CONNECT_MASTER error connecting to master at %1: %2</span></dt><dd><p>
 There was an error opening a connection to the master. The error is
 shown in the log message.
+</p></dd><dt><a name="XFRIN_GOT_INCREMENTAL_RESP"></a><span class="term">XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1</span></dt><dd><p>
+In an attempt of IXFR processing, the begenning SOA of the first difference
+(following the initial SOA that specified the final SOA for all the
+differences) was found.  This means a connection for xfrin tried IXFR
+and really aot a response for incremental updates.
+</p></dd><dt><a name="XFRIN_GOT_NONINCREMENTAL_RESP"></a><span class="term">XFRIN_GOT_NONINCREMENTAL_RESP got nonincremental response for %1</span></dt><dd><p>
+Non incremental transfer was detected at the "first data" of a transfer,
+which is the RR following the initial SOA.  Non incremental transfer is
+either AXFR or AXFR-style IXFR.  In the latter case, it means that
+in a response to IXFR query the first data is not SOA or its SOA serial
+is not equal to the requested SOA serial.
 </p></dd><dt><a name="XFRIN_IMPORT_DNS"></a><span class="term">XFRIN_IMPORT_DNS error importing python DNS module: %1</span></dt><dd><p>
 There was an error importing the python DNS module pydnspp. The most
 likely cause is a PYTHONPATH problem.
@@ -1853,6 +2053,11 @@ was killed.
 </p></dd><dt><a name="XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER"></a><span class="term">XFRIN_MSGQ_SEND_ERROR_ZONE_MANAGER error while contacting %1</span></dt><dd><p>
 There was a problem sending a message to the zone manager. This most
 likely means that the msgq daemon has quit or was killed.
+</p></dd><dt><a name="XFRIN_NOTIFY_UNKNOWN_MASTER"></a><span class="term">XFRIN_NOTIFY_UNKNOWN_MASTER got notification to retransfer zone %1 from %2, expected %3</span></dt><dd><p>
+The system received a notify for the given zone, but the address it came
+from does not match the master address in the Xfrin configuration. The notify
+is ignored. This may indicate that the configuration for the master is wrong,
+that a wrong machine is sending notifies, or that fake notifies are being sent.
 </p></dd><dt><a name="XFRIN_RETRANSFER_UNKNOWN_ZONE"></a><span class="term">XFRIN_RETRANSFER_UNKNOWN_ZONE got notification to retransfer unknown zone %1</span></dt><dd><p>
 There was an internal command to retransfer the given zone, but the
 zone is not known to the system. This may indicate that the configuration
@@ -1866,24 +2071,37 @@ daemon will now shut down.
 </p></dd><dt><a name="XFRIN_UNKNOWN_ERROR"></a><span class="term">XFRIN_UNKNOWN_ERROR unknown error: %1</span></dt><dd><p>
 An uncaught exception was raised while running the xfrin daemon. The
 exception message is printed in the log message.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_DONE"></a><span class="term">XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</span></dt><dd><p>
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_ERROR"></a><span class="term">XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</span></dt><dd><p>
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_FAILED"></a><span class="term">XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</span></dt><dd><p>
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-</p></dd><dt><a name="XFROUT_AXFR_TRANSFER_STARTED"></a><span class="term">XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</span></dt><dd><p>
-A transfer out of the given zone has started.
+</p></dd><dt><a name="XFRIN_XFR_OTHER_FAILURE"></a><span class="term">XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3</span></dt><dd><p>
+The XFR transfer for the given zone has failed due to a problem outside
+of the xfrin module.  Possible reasons are a broken DNS message or failure
+in database connection.  The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_XFR_PROCESS_FAILURE"></a><span class="term">XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</span></dt><dd><p>
+An XFR session failed outside the main protocol handling.  This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure.  Details of the error are shown in
+the log message.  In general, these errors are not really expected
+ones, and indicate an installation error or a program bug.  The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete.  So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_FAILURE"></a><span class="term">XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</span></dt><dd><p>
+The XFR transfer for the given zone has failed due to a protocol error.
+The error is shown in the log message.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_FALLBACK"></a><span class="term">XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</span></dt><dd><p>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_STARTED"></a><span class="term">XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</span></dt><dd><p>
+A connection to the master server has been made, the serial value in
+the SOA record has been checked, and a zone transfer has been started.
+</p></dd><dt><a name="XFRIN_XFR_TRANSFER_SUCCESS"></a><span class="term">XFRIN_XFR_TRANSFER_SUCCESS %1 transfer of zone %2 succeeded</span></dt><dd><p>
+The XFR transfer of the given zone was successfully completed.
 </p></dd><dt><a name="XFROUT_BAD_TSIG_KEY_STRING"></a><span class="term">XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</span></dt><dd><p>
 The TSIG key string as read from the configuration does not represent
 a valid TSIG key.
@@ -1894,6 +2112,9 @@ most likely cause is that the msgq daemon is not running.
 There was a problem reading a response from another module over the
 command and control channel. The most likely cause is that the
 configuration manager b10-cfgmgr is not running.
+</p></dd><dt><a name="XFROUT_CONFIG_ERROR"></a><span class="term">XFROUT_CONFIG_ERROR error found in configuration data: %1</span></dt><dd><p>
+The xfrout process encountered an error when installing the configuration at
+startup time.  Details of the error are included in the log message.
 </p></dd><dt><a name="XFROUT_FETCH_REQUEST_ERROR"></a><span class="term">XFROUT_FETCH_REQUEST_ERROR socket error while fetching a request from the auth daemon</span></dt><dd><p>
 There was a socket error while contacting the b10-auth daemon to
 fetch a transfer request. The auth daemon may have shutdown.
@@ -1908,6 +2129,45 @@ by xfrout could not be found. This suggests that either some libraries
 are missing on the system, or the PYTHONPATH variable is not correct.
 The specific place where this library needs to be depends on your
 system and your specific installation.
+</p></dd><dt><a name="XFROUT_IXFR_MULTIPLE_SOA"></a><span class="term">XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</span></dt><dd><p>
+An IXFR request was received with more than one SOA RRs in the authority
+section.  The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_JOURNAL_SUPPORT"></a><span class="term">XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</span></dt><dd><p>
+An IXFR request was received but the underlying data source did
+not support journaling.  The xfrout daemon fell back to AXFR-style
+IXFR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_SOA"></a><span class="term">XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</span></dt><dd><p>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_VERSION"></a><span class="term">XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</span></dt><dd><p>
+An IXFR request was received, but the requested range of differences
+were not found in the data source.  The xfrout daemon fell back to
+AXFR-style IXFR.
+</p></dd><dt><a name="XFROUT_IXFR_NO_ZONE"></a><span class="term">XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</span></dt><dd><p>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source.  This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone.  The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</p></dd><dt><a name="XFROUT_IXFR_UPTODATE"></a><span class="term">XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</span></dt><dd><p>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server.  The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic.  This will soon
+be implemented.
+</p></dd><dt><a name="XFROUT_MODULECC_SESSION_ERROR"></a><span class="term">XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</span></dt><dd><p>
+There was a problem in the lower level module handling configuration and
+control commands.  This could happen for various reasons, but the most likely
+cause is that the configuration database contains a syntax error and xfrout
+failed to start at initialization.  A detailed error message from the module
+will also be displayed.
 </p></dd><dt><a name="XFROUT_NEW_CONFIG"></a><span class="term">XFROUT_NEW_CONFIG Update xfrout configuration</span></dt><dd><p>
 New configuration settings have been sent from the configuration
 manager. The xfrout daemon will now apply them.
@@ -1929,15 +2189,25 @@ There was an error processing a transfer request. The error is included
 in the log message, but at this point no specific information other
 than that could be given. This points to incomplete exception handling
 in the code.
-</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</span></dt><dd><p>
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
-</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</span></dt><dd><p>
+</p></dd><dt><a name="XFROUT_QUERY_DROPPED"></a><span class="term">XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</span></dt><dd><p>
+The xfrout process silently dropped a request to transfer zone to
+given host.  This is required by the ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</p></dd><dt><a name="XFROUT_QUERY_QUOTA_EXCCEEDED"></a><span class="term">XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</span></dt><dd><p>
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message.  The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
+</p></dd><dt><a name="XFROUT_QUERY_REJECTED"></a><span class="term">XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</span></dt><dd><p>
 The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
 </p></dd><dt><a name="XFROUT_RECEIVED_SHUTDOWN_COMMAND"></a><span class="term">XFROUT_RECEIVED_SHUTDOWN_COMMAND shutdown command received</span></dt><dd><p>
 The xfrout daemon received a shutdown command from the command channel
 and will now shut down.
@@ -1973,6 +2243,30 @@ socket needed for contacting the b10-auth daemon to pass requests
 on, but the file is in use. The most likely cause is that another
 xfrout daemon process is still running. This xfrout daemon (the one
 printing this message) will not start.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_CHECK_ERROR"></a><span class="term">XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</span></dt><dd><p>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage.  Some detail of the error is also included in the
+message.  The xfrout server tries to return a SERVFAIL response in this case.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_DONE"></a><span class="term">XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</span></dt><dd><p>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_ERROR"></a><span class="term">XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</span></dt><dd><p>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_FAILED"></a><span class="term">XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</span></dt><dd><p>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</p></dd><dt><a name="XFROUT_XFR_TRANSFER_STARTED"></a><span class="term">XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</span></dt><dd><p>
+A transfer out of the given zone has started.
 </p></dd><dt><a name="ZONEMGR_CCSESSION_ERROR"></a><span class="term">ZONEMGR_CCSESSION_ERROR command channel session error: %1</span></dt><dd><p>
 An error was encountered on the command channel.  The message indicates
 the nature of the error.
diff --git a/doc/guide/bind10-messages.xml b/doc/guide/bind10-messages.xml
index bade381..4dc02d4 100644
--- a/doc/guide/bind10-messages.xml
+++ b/doc/guide/bind10-messages.xml
@@ -573,19 +573,117 @@ needs a dedicated message bus.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_CONFIGURATION_START_AUTH">
-<term>BIND10_CONFIGURATION_START_AUTH start authoritative server: %1</term>
+<varlistentry id="BIND10_COMPONENT_FAILED">
+<term>BIND10_COMPONENT_FAILED component %1 (pid %2) failed with %3 exit status</term>
 <listitem><para>
-This message shows whether or not the authoritative server should be
-started according to the configuration.
+The process terminated, but the bind10 boss didn't expect it to, which means
+it must have failed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_CONFIGURATION_START_RESOLVER">
-<term>BIND10_CONFIGURATION_START_RESOLVER start resolver: %1</term>
+<varlistentry id="BIND10_COMPONENT_RESTART">
+<term>BIND10_COMPONENT_RESTART component %1 is about to restart</term>
 <listitem><para>
-This message shows whether or not the resolver should be
-started according to the configuration.
+The named component failed previously and we will try to restart it to provide
+as flawless service as possible, but it should be investigated what happened,
+as it could happen again.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START">
+<term>BIND10_COMPONENT_START component %1 is starting</term>
+<listitem><para>
+The named component is about to be started by the boss process.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_START_EXCEPTION">
+<term>BIND10_COMPONENT_START_EXCEPTION component %1 failed to start: %2</term>
+<listitem><para>
+An exception (mentioned in the message) happened during the startup of the
+named component. The componet is not considered started and further actions
+will be taken about it.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_STOP">
+<term>BIND10_COMPONENT_STOP component %1 is being stopped</term>
+<listitem><para>
+A component is about to be asked to stop willingly by the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_COMPONENT_UNSATISFIED">
+<term>BIND10_COMPONENT_UNSATISFIED component %1 is required to run and failed</term>
+<listitem><para>
+A component failed for some reason (see previous messages). It is either a core
+component or needed component that was just started. In any case, the system
+can't continue without it and will terminate.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_BUILD">
+<term>BIND10_CONFIGURATOR_BUILD building plan '%1' -> '%2'</term>
+<listitem><para>
+A debug message. This indicates that the configurator is building a plan
+how to change configuration from the older one to newer one. This does no
+real work yet, it just does the planning what needs to be done.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_PLAN_INTERRUPTED">
+<term>BIND10_CONFIGURATOR_PLAN_INTERRUPTED configurator plan interrupted, only %1 of %2 done</term>
+<listitem><para>
+There was an exception during some planned task. The plan will not continue and
+only some tasks of the plan were completed. The rest is aborted. The exception
+will be propagated.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RECONFIGURE">
+<term>BIND10_CONFIGURATOR_RECONFIGURE reconfiguring running components</term>
+<listitem><para>
+A different configuration of which components should be running is being
+installed. All components that are no longer needed will be stopped and
+newly introduced ones started. This happens at startup, when the configuration
+is read the first time, or when an operator changes configuration of the boss.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_RUN">
+<term>BIND10_CONFIGURATOR_RUN running plan of %1 tasks</term>
+<listitem><para>
+A debug message. The configurator is about to execute a plan of actions it
+computed previously.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_START">
+<term>BIND10_CONFIGURATOR_START bind10 component configurator is starting up</term>
+<listitem><para>
+The part that cares about starting and stopping the right component from the
+boss process is starting up. This happens only once at the startup of the
+boss process. It will start the basic set of processes now (the ones boss
+needs to read the configuration), the rest will be started after the
+configuration is known.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_STOP">
+<term>BIND10_CONFIGURATOR_STOP bind10 component configurator is shutting down</term>
+<listitem><para>
+The part that cares about starting and stopping processes in the boss is
+shutting down. All started components will be shut down now (more precisely,
+asked to terminate by their own, if they fail to comply, other parts of
+the boss process will try to force them).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_CONFIGURATOR_TASK">
+<term>BIND10_CONFIGURATOR_TASK performing task %1 on %2</term>
+<listitem><para>
+A debug message. The configurator is about to perform one task of the plan it
+is currently executing on the named component.
 </para></listitem>
 </varlistentry>
 
@@ -632,14 +730,6 @@ running, which needs to be stopped.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_MSGQ_DAEMON_ENDED">
-<term>BIND10_MSGQ_DAEMON_ENDED b10-msgq process died, shutting down</term>
-<listitem><para>
-The message bus daemon has died. This is a fatal error, since it may
-leave the system in an inconsistent state. BIND10 will now shut down.
-</para></listitem>
-</varlistentry>
-
 <varlistentry id="BIND10_MSGQ_DISAPPEARED">
 <term>BIND10_MSGQ_DISAPPEARED msgq channel disappeared</term>
 <listitem><para>
@@ -649,24 +739,12 @@ inconsistent state of the system, and BIND 10 will now shut down.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_PROCESS_ENDED_NO_EXIT_STATUS">
-<term>BIND10_PROCESS_ENDED_NO_EXIT_STATUS process %1 (PID %2) died: exit status not available</term>
-<listitem><para>
-The given process ended unexpectedly, but no exit status is
-available. See BIND10_PROCESS_ENDED_WITH_EXIT_STATUS for a longer
-description.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="BIND10_PROCESS_ENDED_WITH_EXIT_STATUS">
-<term>BIND10_PROCESS_ENDED_WITH_EXIT_STATUS process %1 (PID %2) terminated, exit status = %3</term>
+<varlistentry id="BIND10_PROCESS_ENDED">
+<term>BIND10_PROCESS_ENDED process %2 of %1 ended with status %3</term>
 <listitem><para>
-The given process ended unexpectedly with the given exit status.
-Depending on which module it was, it may simply be restarted, or it
-may be a problem that will cause the boss module to shut down too.
-The latter happens if it was the message bus daemon, which, if it has
-died suddenly, may leave the system in an inconsistent state. BIND10
-will also shut down now if it has been run with --brittle.
+This indicates a process started previously terminated. The process id
+and component owning the process are indicated, as well as the exit code.
+This doesn't distinguish if the process was supposed to terminate or not.
 </para></listitem>
 </varlistentry>
 
@@ -740,6 +818,13 @@ The boss module is sending a SIGTERM signal to the given process.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="BIND10_SETUID">
+<term>BIND10_SETUID setting UID to %1</term>
+<listitem><para>
+The boss switches the user it runs as to the given UID.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="BIND10_SHUTDOWN">
 <term>BIND10_SHUTDOWN stopping the server</term>
 <listitem><para>
@@ -774,15 +859,6 @@ looks like a programmer error.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_SOCKCREATOR_CRASHED">
-<term>BIND10_SOCKCREATOR_CRASHED the socket creator crashed</term>
-<listitem><para>
-The socket creator terminated unexpectedly. It is not possible to restart it
-(because the boss already gave up root privileges), so the system is going
-to terminate.
-</para></listitem>
-</varlistentry>
-
 <varlistentry id="BIND10_SOCKCREATOR_EOF">
 <term>BIND10_SOCKCREATOR_EOF eof while expecting data from socket creator</term>
 <listitem><para>
@@ -846,6 +922,14 @@ The boss forwards a request for a socket to the socket creator.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="BIND10_STARTED_CC">
+<term>BIND10_STARTED_CC started configuration/command session</term>
+<listitem><para>
+Debug message given when BIND 10 has successfull started the object that
+handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="BIND10_STARTED_PROCESS">
 <term>BIND10_STARTED_PROCESS started %1</term>
 <listitem><para>
@@ -867,6 +951,14 @@ Informational message on startup that shows the full version.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="BIND10_STARTING_CC">
+<term>BIND10_STARTING_CC starting configuration/command session</term>
+<listitem><para>
+Informational message given when BIND 10 is starting the session object
+that handles configuration and commands.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="BIND10_STARTING_PROCESS">
 <term>BIND10_STARTING_PROCESS starting process %1</term>
 <listitem><para>
@@ -905,10 +997,41 @@ shown, and BIND10 will now shut down.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="BIND10_START_AS_NON_ROOT">
-<term>BIND10_START_AS_NON_ROOT starting %1 as a user, not root. This might fail.</term>
+<varlistentry id="BIND10_STARTUP_UNEXPECTED_MESSAGE">
+<term>BIND10_STARTUP_UNEXPECTED_MESSAGE unrecognised startup message %1</term>
 <listitem><para>
-The given module is being started or restarted without root privileges.
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is recognised as being of the
+correct format but is unexpected.  It may be that processes are starting
+of sequence.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_STARTUP_UNRECOGNISED_MESSAGE">
+<term>BIND10_STARTUP_UNRECOGNISED_MESSAGE unrecognised startup message %1</term>
+<listitem><para>
+During the startup process, a number of messages are exchanged between the
+Boss process and the processes it starts.  This error is output when a
+message received by the Boss process is not recognised.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_AUTH">
+<term>BIND10_START_AS_NON_ROOT_AUTH starting b10-auth as a user, not root. This might fail.</term>
+<listitem><para>
+The authoritative server is being started or restarted without root privileges.
+If the module needs these privileges, it may have problems starting.
+Note that this issue should be resolved by the pending 'socket-creator'
+process; once that has been implemented, modules should not need root
+privileges anymore. See tickets #800 and #801 for more information.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="BIND10_START_AS_NON_ROOT_RESOLVER">
+<term>BIND10_START_AS_NON_ROOT_RESOLVER starting b10-resolver as a user, not root. This might fail.</term>
+<listitem><para>
+The resolver is being started or restarted without root privileges.
 If the module needs these privileges, it may have problems starting.
 Note that this issue should be resolved by the pending 'socket-creator'
 process; once that has been implemented, modules should not need root
@@ -932,6 +1055,20 @@ action will be taken by the boss process.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="BIND10_WAIT_CFGMGR">
+<term>BIND10_WAIT_CFGMGR waiting for configuration manager process to initialize</term>
+<listitem><para>
+The configuration manager process is so critical to operation of BIND 10
+that after starting it, the Boss module will wait for it to initialize
+itself before continuing.  This debug message is produced during the
+wait and may be output zero or more times depending on how long it takes
+the configuration manager to start up.  The total length of time Boss
+will wait for the configuration manager before reporting an error is
+set with the command line --wait switch, which has a default value of
+ten seconds.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="CACHE_ENTRY_MISSING_RRSET">
 <term>CACHE_ENTRY_MISSING_RRSET missing RRset to generate message for %1</term>
 <listitem><para>
@@ -1535,6 +1672,13 @@ certificate file could not be read.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="CMDCTL_STARTED">
+<term>CMDCTL_STARTED cmdctl is listening for connections on %1:%2</term>
+<listitem><para>
+The cmdctl daemon has started and is now listening for connections.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="CMDCTL_STOPPED_BY_KEYBOARD">
 <term>CMDCTL_STOPPED_BY_KEYBOARD keyboard interrupt, shutting down</term>
 <listitem><para>
@@ -1909,6 +2053,50 @@ database). The data in database should be checked and fixed.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_END">
+<term>DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences.  The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_NEXT">
+<term>DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5</term>
+<listitem><para>
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset.  The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADER_START">
+<term>DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5</term>
+<listitem><para>
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source.  The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="DATASRC_DATABASE_JOURNALREADR_BADDATA">
+<term>DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6</term>
+<listitem><para>
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset.  The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message.  The administrator should examine the diff in the database
+to find any invalid data and fix it.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="DATASRC_DATABASE_UPDATER_COMMIT">
 <term>DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3</term>
 <listitem><para>
@@ -2890,6 +3078,20 @@ together, the later one get's overwritten to the earlier one in the sequence.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="LIBXFRIN_NO_JOURNAL">
+<term>LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2</term>
+<listitem><para>
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled.  At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR).  Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="LOGIMPL_ABOVE_MAX_DEBUG">
 <term>LOGIMPL_ABOVE_MAX_DEBUG debug level of %1 is too high and will be set to the maximum of %2</term>
 <listitem><para>
@@ -3126,6 +3328,26 @@ to the named output file.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="NOTIFY_OUT_DATASRC_ACCESS_FAILURE">
+<term>NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1</term>
+<listitem><para>
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message.  This can be either a
+configuration error or installation setup failure.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND">
+<term>NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found</term>
+<listitem><para>
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source.  This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process.  So this means some critical inconsistency in the data
+source or software bug.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="NOTIFY_OUT_INVALID_ADDRESS">
 <term>NOTIFY_OUT_INVALID_ADDRESS invalid address %1#%2: %3</term>
 <listitem><para>
@@ -3237,6 +3459,23 @@ is reached.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="NOTIFY_OUT_ZONE_BAD_SOA">
+<term>NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA</term>
+<listitem><para>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs.  Notify message won't
+be sent to such a zone.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="NOTIFY_OUT_ZONE_NO_NS">
+<term>NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR</term>
+<listitem><para>
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR.  Notify message won't be sent to such a zone.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="NSAS_FIND_NS_ADDRESS">
 <term>NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1</term>
 <listitem><para>
@@ -4144,6 +4383,16 @@ be ignored.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="STATHTTPD_SERVER_DATAERROR">
+<term>STATHTTPD_SERVER_DATAERROR HTTP server data error: %1</term>
+<listitem><para>
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="STATHTTPD_SERVER_ERROR">
 <term>STATHTTPD_SERVER_ERROR HTTP server error: %1</term>
 <listitem><para>
@@ -4518,6 +4767,25 @@ in database connection.  The error is shown in the log message.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="XFRIN_XFR_PROCESS_FAILURE">
+<term>XFRIN_XFR_PROCESS_FAILURE %1 transfer of zone %2/%3 failed: %4</term>
+<listitem><para>
+An XFR session failed outside the main protocol handling.  This
+includes an error at the data source level at the initialization
+phase, unexpected failure in the network connection setup to the
+master server, or even more unexpected failure due to unlikely events
+such as memory allocation failure.  Details of the error are shown in
+the log message.  In general, these errors are not really expected
+ones, and indicate an installation error or a program bug.  The
+session handler thread tries to clean up all intermediate resources
+even on these errors, but it may be incomplete.  So, if this log
+message continuously appears, system resource consumption should be
+checked, and you may even want to disable the corresponding transfers.
+You may also want to file a bug report if this message appears so
+often.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="XFRIN_XFR_TRANSFER_FAILURE">
 <term>XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3</term>
 <listitem><para>
@@ -4526,6 +4794,16 @@ The error is shown in the log message.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="XFRIN_XFR_TRANSFER_FALLBACK">
+<term>XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1</term>
+<listitem><para>
+The IXFR transfer of the given zone failed. This might happen in many cases,
+such that the remote server doesn't support IXFR, we don't have the SOA record
+(or the zone at all), we are out of sync, etc. In many of these situations,
+AXFR could still work. Therefore we try that one in case it helps.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="XFRIN_XFR_TRANSFER_STARTED">
 <term>XFRIN_XFR_TRANSFER_STARTED %1 transfer of zone %2 started</term>
 <listitem><para>
@@ -4541,44 +4819,6 @@ The XFR transfer of the given zone was successfully completed.
 </para></listitem>
 </varlistentry>
 
-<varlistentry id="XFROUT_AXFR_TRANSFER_DONE">
-<term>XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete</term>
-<listitem><para>
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_ERROR">
-<term>XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3</term>
-<listitem><para>
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_FAILED">
-<term>XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3</term>
-<listitem><para>
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-</para></listitem>
-</varlistentry>
-
-<varlistentry id="XFROUT_AXFR_TRANSFER_STARTED">
-<term>XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started</term>
-<listitem><para>
-A transfer out of the given zone has started.
-</para></listitem>
-</varlistentry>
-
 <varlistentry id="XFROUT_BAD_TSIG_KEY_STRING">
 <term>XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1</term>
 <listitem><para>
@@ -4641,6 +4881,69 @@ system and your specific installation.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="XFROUT_IXFR_MULTIPLE_SOA">
+<term>XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs</term>
+<listitem><para>
+An IXFR request was received with more than one SOA RRs in the authority
+section.  The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_JOURNAL_SUPPORT">
+<term>XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received but the underlying data source did
+not support journaling.  The xfrout daemon fell back to AXFR-style
+IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_SOA">
+<term>XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA</term>
+<listitem><para>
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_VERSION">
+<term>XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR</term>
+<listitem><para>
+An IXFR request was received, but the requested range of differences
+were not found in the data source.  The xfrout daemon fell back to
+AXFR-style IXFR.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_NO_ZONE">
+<term>XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal</term>
+<listitem><para>
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source.  This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone.  The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_IXFR_UPTODATE">
+<term>XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)</term>
+<listitem><para>
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server.  The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic.  This will soon
+be implemented.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="XFROUT_MODULECC_SESSION_ERROR">
 <term>XFROUT_MODULECC_SESSION_ERROR error encountered by configuration/command module: %1</term>
 <listitem><para>
@@ -4699,21 +5002,36 @@ in the code.
 </varlistentry>
 
 <varlistentry id="XFROUT_QUERY_DROPPED">
-<term>XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped</term>
+<term>XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped</term>
+<listitem><para>
+The xfrout process silently dropped a request to transfer zone to
+given host.  This is required by the ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_QUERY_QUOTA_EXCCEEDED">
+<term>XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)</term>
 <listitem><para>
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message.  The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
 </para></listitem>
 </varlistentry>
 
 <varlistentry id="XFROUT_QUERY_REJECTED">
-<term>XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected</term>
+<term>XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected</term>
 <listitem><para>
 The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
 </para></listitem>
 </varlistentry>
 
@@ -4792,6 +5110,55 @@ printing this message) will not start.
 </para></listitem>
 </varlistentry>
 
+<varlistentry id="XFROUT_XFR_TRANSFER_CHECK_ERROR">
+<term>XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4</term>
+<listitem><para>
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage.  Some detail of the error is also included in the
+message.  The xfrout server tries to return a SERVFAIL response in this case.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_DONE">
+<term>XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete</term>
+<listitem><para>
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_ERROR">
+<term>XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4</term>
+<listitem><para>
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_FAILED">
+<term>XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4</term>
+<listitem><para>
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+</para></listitem>
+</varlistentry>
+
+<varlistentry id="XFROUT_XFR_TRANSFER_STARTED">
+<term>XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started</term>
+<listitem><para>
+A transfer out of the given zone has started.
+</para></listitem>
+</varlistentry>
+
 <varlistentry id="ZONEMGR_CCSESSION_ERROR">
 <term>ZONEMGR_CCSESSION_ERROR command channel session error: %1</term>
 <listitem><para>
diff --git a/src/bin/auth/auth_srv.cc b/src/bin/auth/auth_srv.cc
index c9dac88..caf69b9 100644
--- a/src/bin/auth/auth_srv.cc
+++ b/src/bin/auth/auth_srv.cc
@@ -91,9 +91,9 @@ public:
     bool processNormalQuery(const IOMessage& io_message, MessagePtr message,
                             OutputBufferPtr buffer,
                             auto_ptr<TSIGContext> tsig_context);
-    bool processAxfrQuery(const IOMessage& io_message, MessagePtr message,
-                          OutputBufferPtr buffer,
-                          auto_ptr<TSIGContext> tsig_context);
+    bool processXfrQuery(const IOMessage& io_message, MessagePtr message,
+                         OutputBufferPtr buffer,
+                         auto_ptr<TSIGContext> tsig_context);
     bool processNotify(const IOMessage& io_message, MessagePtr message,
                        OutputBufferPtr buffer,
                        auto_ptr<TSIGContext> tsig_context);
@@ -219,8 +219,9 @@ class ConfigChecker : public SimpleCallback {
 public:
     ConfigChecker(AuthSrv* srv) : server_(srv) {}
     virtual void operator()(const IOMessage&) const {
-        if (server_->getConfigSession()->hasQueuedMsgs()) {
-            server_->getConfigSession()->checkCommand();
+        ModuleCCSession* cfg_session = server_->getConfigSession();
+        if (cfg_session != NULL && cfg_session->hasQueuedMsgs()) {
+            cfg_session->checkCommand();
         }
     }
 private:
@@ -472,10 +473,11 @@ AuthSrv::processMessage(const IOMessage& io_message, MessagePtr message,
         ConstQuestionPtr question = *message->beginQuestion();
         const RRType &qtype = question->getType();
         if (qtype == RRType::AXFR()) {
-            sendAnswer = impl_->processAxfrQuery(io_message, message, buffer,
-                                                 tsig_context);
+            sendAnswer = impl_->processXfrQuery(io_message, message, buffer,
+                                                tsig_context);
         } else if (qtype == RRType::IXFR()) {
-            makeErrorMessage(message, buffer, Rcode::NOTIMP(), tsig_context);
+            sendAnswer = impl_->processXfrQuery(io_message, message, buffer,
+                                                tsig_context);
         } else {
             sendAnswer = impl_->processNormalQuery(io_message, message, buffer,
                                                    tsig_context);
@@ -543,9 +545,9 @@ AuthSrvImpl::processNormalQuery(const IOMessage& io_message, MessagePtr message,
 }
 
 bool
-AuthSrvImpl::processAxfrQuery(const IOMessage& io_message, MessagePtr message,
-                              OutputBufferPtr buffer,
-                              auto_ptr<TSIGContext> tsig_context)
+AuthSrvImpl::processXfrQuery(const IOMessage& io_message, MessagePtr message,
+                             OutputBufferPtr buffer,
+                             auto_ptr<TSIGContext> tsig_context)
 {
     // Increment query counter.
     incCounter(io_message.getSocket().getProtocol());
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index 53c019f..dd00ea5 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -32,8 +32,8 @@ query_bench_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 query_bench_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 query_bench_LDADD += $(top_builddir)/src/lib/log/liblog.la
 query_bench_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
 query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
 query_bench_LDADD += $(SQLITE_LIBS)
 
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index b2e0234..f159262 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -117,7 +117,6 @@ void
 Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
     if (nsec->getRdataCount() == 0) {
         isc_throw(BadNSEC, "NSEC for NXDOMAIN is empty");
-        return;
     }
 
     // Add the NSEC proving NXDOMAIN to the authority section.
@@ -152,7 +151,6 @@ Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
     if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
         fresult.rrset->getRdataCount() == 0) {
         isc_throw(BadNSEC, "Unexpected result for wildcard NXDOMAIN proof");
-        return;
     }
 
     // Add the (no-) wildcard proof only when it's different from the NSEC
@@ -178,7 +176,6 @@ Query::addWildcardProof(ZoneFinder& finder) {
     if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
         fresult.rrset->getRdataCount() == 0) {
         isc_throw(BadNSEC, "Unexpected result for wildcard proof");
-        return;
     }
     response_.addRRset(Message::SECTION_AUTHORITY,
                        boost::const_pointer_cast<RRset>(fresult.rrset),
@@ -186,6 +183,33 @@ Query::addWildcardProof(ZoneFinder& finder) {
 }
 
 void
+Query::addWildcardNXRRSETProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
+    // There should be one NSEC RR which was found in the zone to prove
+    // that there is not matched <QNAME,QTYPE> via wildcard expansion.
+    if (nsec->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "NSEC for WILDCARD_NXRRSET is empty");
+    }
+    // Add this NSEC RR to authority section.
+    response_.addRRset(Message::SECTION_AUTHORITY,
+                      boost::const_pointer_cast<RRset>(nsec), dnssec_);
+    
+    const ZoneFinder::FindResult fresult =
+        finder.find(qname_, RRType::NSEC(), NULL,
+                    dnssec_opt_ | ZoneFinder::NO_WILDCARD);
+    if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
+        fresult.rrset->getRdataCount() == 0) {
+        isc_throw(BadNSEC, "Unexpected result for no match QNAME proof");
+    }
+   
+    if (nsec->getName() != fresult.rrset->getName()) {
+        // one NSEC RR proves wildcard_nxrrset that no matched QNAME.
+        response_.addRRset(Message::SECTION_AUTHORITY,
+                           boost::const_pointer_cast<RRset>(fresult.rrset),
+                           dnssec_);
+    }
+}
+    
+void
 Query::addAuthAdditional(ZoneFinder& finder) {
     // Fill in authority and addtional sections.
     ZoneFinder::FindResult ns_result = finder.find(finder.getOrigin(),
@@ -355,6 +379,12 @@ Query::process() {
                                        dnssec_);
                 }
                 break;
+            case ZoneFinder::WILDCARD_NXRRSET:
+                addSOA(*result.zone_finder);
+                if (dnssec_ && db_result.rrset) {
+                    addWildcardNXRRSETProof(zfinder, db_result.rrset);
+                }
+                break;
             default:
                 // This is basically a bug of the data source implementation,
                 // but could also happen in the middle of development where
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index 3282c0d..43a8b6b 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -82,6 +82,18 @@ private:
     /// This corresponds to Section 3.1.3.3 of RFC 4035.
     void addWildcardProof(isc::datasrc::ZoneFinder& finder);
 
+    /// \brief Adds one NSEC RR proved no matched QNAME,one NSEC RR proved no
+    /// matched <QNAME,QTYPE> through wildcard extension.
+    ///
+    /// Add NSEC RRs that prove an WILDCARD_NXRRSET result.
+    /// This corresponds to Section 3.1.3.4 of RFC 4035.
+    /// \param finder The ZoneFinder through which the authority data for the
+    /// query is to be found.
+    /// \param nsec The RRset (NSEC RR) which proved that there is no matched 
+    /// <QNAME,QTTYPE>.
+    void addWildcardNXRRSETProof(isc::datasrc::ZoneFinder& finder,
+                                 isc::dns::ConstRRsetPtr nsec);
+    
     /// \brief Look up additional data (i.e., address records for the names
     /// included in NS or MX records) and add them to the additional section.
     ///
diff --git a/src/bin/auth/tests/auth_srv_unittest.cc b/src/bin/auth/tests/auth_srv_unittest.cc
index 4698588..ac25cd6 100644
--- a/src/bin/auth/tests/auth_srv_unittest.cc
+++ b/src/bin/auth/tests/auth_srv_unittest.cc
@@ -229,7 +229,8 @@ TEST_F(AuthSrvTest, AXFROverUDP) {
 TEST_F(AuthSrvTest, AXFRSuccess) {
     EXPECT_FALSE(xfrout.isConnected());
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     // On success, the AXFR query has been passed to a separate process,
     // so we shouldn't have to respond.
@@ -245,7 +246,8 @@ TEST_F(AuthSrvTest, TSIGSigned) {
     const TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
     TSIGContext context(key);
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("version.bind"), RRClass::CH(), RRType::TXT());
+                                       Name("version.bind"), RRClass::CH(),
+                                       RRType::TXT());
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Run the message through the server
@@ -278,7 +280,8 @@ TEST_F(AuthSrvTest, TSIGSignedBadKey) {
     TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
     TSIGContext context(key);
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("version.bind"), RRClass::CH(), RRType::TXT());
+                                       Name("version.bind"), RRClass::CH(),
+                                       RRType::TXT());
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Process the message, but use a different key there
@@ -309,7 +312,8 @@ TEST_F(AuthSrvTest, TSIGBadSig) {
     TSIGKey key("key:c2VjcmV0Cg==:hmac-sha1");
     TSIGContext context(key);
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("version.bind"), RRClass::CH(), RRType::TXT());
+                                       Name("version.bind"), RRClass::CH(),
+                                       RRType::TXT());
     createRequestPacket(request_message, IPPROTO_UDP, &context);
 
     // Process the message, but use a different key there
@@ -375,7 +379,8 @@ TEST_F(AuthSrvTest, AXFRConnectFail) {
     EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
     xfrout.disableConnect();
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_TRUE(dnsserv.hasAnswer());
@@ -388,7 +393,8 @@ TEST_F(AuthSrvTest, AXFRSendFail) {
     // first send a valid query, making the connection with the xfr process
     // open.
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_TRUE(xfrout.isConnected());
@@ -397,7 +403,8 @@ TEST_F(AuthSrvTest, AXFRSendFail) {
     parse_message->clear(Message::PARSE);
     response_obuffer->clear();
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_TRUE(dnsserv.hasAnswer());
@@ -414,7 +421,66 @@ TEST_F(AuthSrvTest, AXFRDisconnectFail) {
     xfrout.disableSend();
     xfrout.disableDisconnect();
     UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::AXFR());
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::AXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    EXPECT_THROW(server.processMessage(*io_message, parse_message,
+                                       response_obuffer, &dnsserv),
+                 XfroutError);
+    EXPECT_TRUE(xfrout.isConnected());
+    // XXX: we need to re-enable disconnect.  otherwise an exception would be
+    // thrown via the destructor of the server.
+    xfrout.enableDisconnect();
+}
+
+TEST_F(AuthSrvTest, IXFRConnectFail) {
+    EXPECT_FALSE(xfrout.isConnected()); // check prerequisite
+    xfrout.disableConnect();
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_TRUE(dnsserv.hasAnswer());
+    headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(),
+                opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+    EXPECT_FALSE(xfrout.isConnected());
+}
+
+TEST_F(AuthSrvTest, IXFRSendFail) {
+    // first send a valid query, making the connection with the xfr process
+    // open.
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_TRUE(xfrout.isConnected());
+
+    xfrout.disableSend();
+    parse_message->clear(Message::PARSE);
+    response_obuffer->clear();
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_TRUE(dnsserv.hasAnswer());
+    headerCheck(*parse_message, default_qid, Rcode::SERVFAIL(),
+                opcode.getCode(), QR_FLAG, 1, 0, 0, 0);
+
+    // The connection should have been closed due to the send failure.
+    EXPECT_FALSE(xfrout.isConnected());
+}
+
+TEST_F(AuthSrvTest, IXFRDisconnectFail) {
+    // In our usage disconnect() shouldn't fail.  So we'll see the exception
+    // should it be thrown.
+    xfrout.disableSend();
+    xfrout.disableDisconnect();
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                                       Name("example.com"), RRClass::IN(),
+                                       RRType::IXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     EXPECT_THROW(server.processMessage(*io_message, parse_message,
                                        response_obuffer, &dnsserv),
@@ -426,8 +492,9 @@ TEST_F(AuthSrvTest, AXFRDisconnectFail) {
 }
 
 TEST_F(AuthSrvTest, notify) {
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -458,8 +525,9 @@ TEST_F(AuthSrvTest, notify) {
 
 TEST_F(AuthSrvTest, notifyForCHClass) {
     // Same as the previous test, but for the CH RRClass.
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::CH(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::CH(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -487,8 +555,9 @@ TEST_F(AuthSrvTest, notifyEmptyQuestion) {
 }
 
 TEST_F(AuthSrvTest, notifyMultiQuestions) {
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     // add one more SOA question
     request_message.addQuestion(Question(Name("example.com"), RRClass::IN(),
                                          RRType::SOA()));
@@ -501,8 +570,9 @@ TEST_F(AuthSrvTest, notifyMultiQuestions) {
 }
 
 TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::NS());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::NS());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -513,8 +583,9 @@ TEST_F(AuthSrvTest, notifyNonSOAQuestion) {
 
 TEST_F(AuthSrvTest, notifyWithoutAA) {
     // implicitly leave the AA bit off.  our implementation will accept it.
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
     EXPECT_TRUE(dnsserv.hasAnswer());
@@ -523,8 +594,9 @@ TEST_F(AuthSrvTest, notifyWithoutAA) {
 }
 
 TEST_F(AuthSrvTest, notifyWithErrorRcode) {
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     request_message.setRcode(Rcode::SERVFAIL());
     createRequestPacket(request_message, IPPROTO_UDP);
@@ -537,8 +609,9 @@ TEST_F(AuthSrvTest, notifyWithErrorRcode) {
 TEST_F(AuthSrvTest, notifyWithoutSession) {
     server.setXfrinSession(NULL);
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
 
@@ -551,8 +624,9 @@ TEST_F(AuthSrvTest, notifyWithoutSession) {
 TEST_F(AuthSrvTest, notifySendFail) {
     notify_session.disableSend();
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
 
@@ -563,8 +637,9 @@ TEST_F(AuthSrvTest, notifySendFail) {
 TEST_F(AuthSrvTest, notifyReceiveFail) {
     notify_session.disableReceive();
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -574,8 +649,9 @@ TEST_F(AuthSrvTest, notifyReceiveFail) {
 TEST_F(AuthSrvTest, notifyWithBogusSessionMessage) {
     notify_session.setMessage(Element::fromJSON("{\"foo\": 1}"));
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -586,8 +662,9 @@ TEST_F(AuthSrvTest, notifyWithSessionMessageError) {
     notify_session.setMessage(
         Element::fromJSON("{\"result\": [1, \"FAIL\"]}"));
 
-    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(), default_qid,
-                         Name("example.com"), RRClass::IN(), RRType::SOA());
+    UnitTestUtil::createRequestMessage(request_message, Opcode::NOTIFY(),
+                                       default_qid, Name("example.com"),
+                                       RRClass::IN(), RRType::SOA());
     request_message.setHeaderFlag(Message::HEADERFLAG_AA);
     createRequestPacket(request_message, IPPROTO_UDP);
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
@@ -737,12 +814,28 @@ TEST_F(AuthSrvTest, queryCounterTCPAXFR) {
                          Name("example.com"), RRClass::IN(), RRType::AXFR());
     createRequestPacket(request_message, IPPROTO_TCP);
     // On success, the AXFR query has been passed to a separate process,
-    // so we shouldn't have to respond.
+    // so auth itself shouldn't respond.
     server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_FALSE(dnsserv.hasAnswer());
     // After processing TCP AXFR query, the counter should be 1.
     EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
 }
 
+// Submit TCP IXFR query and check query counter
+TEST_F(AuthSrvTest, queryCounterTCPIXFR) {
+    // The counter should be initialized to 0.
+    EXPECT_EQ(0, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+    UnitTestUtil::createRequestMessage(request_message, opcode, default_qid,
+                         Name("example.com"), RRClass::IN(), RRType::IXFR());
+    createRequestPacket(request_message, IPPROTO_TCP);
+    // On success, the IXFR query has been passed to a separate process,
+    // so auth itself shouldn't respond.
+    server.processMessage(*io_message, parse_message, response_obuffer, &dnsserv);
+    EXPECT_FALSE(dnsserv.hasAnswer());
+    // After processing TCP IXFR query, the counter should be 1.
+    EXPECT_EQ(1, server.getCounter(AuthCounters::COUNTER_TCP_QUERY));
+}
+
 // class for queryCounterUnexpected test
 // getProtocol() returns IPPROTO_IP
 class DummyUnknownSocket : public IOSocket {
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 16a2409..14067ab 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -100,6 +100,22 @@ const char* const cnamewild_txt =
     "*.cnamewild.example.com. 3600 IN CNAME www.example.org.\n";
 const char* const nsec_cnamewild_txt = "*.cnamewild.example.com. "
     "3600 IN NSEC delegation.example.com. CNAME NSEC RRSIG\n";
+// Wildcard_nxrrset
+const char* const wild_txt_nxrrset =
+    "*.uwild.example.com. 3600 IN A 192.0.2.9\n";
+const char* const nsec_wild_txt_nxrrset =
+    "*.uwild.example.com. 3600 IN NSEC www.uwild.example.com. A NSEC RRSIG\n";
+const char* const wild_txt_next =
+    "www.uwild.example.com. 3600 IN A 192.0.2.11\n";
+const char* const nsec_wild_txt_next =
+    "www.uwild.example.com. 3600 IN NSEC *.wild.example.com. A NSEC RRSIG\n";
+// Wildcard empty
+const char* const empty_txt = "b.*.t.example.com. 3600 IN A 192.0.2.13\n";
+const char* const nsec_empty_txt =
+    "b.*.t.example.com. 3600 IN NSEC *.uwild.example.com. A NSEC RRSIG\n";
+const char* const empty_prev_txt = "t.example.com. 3600 IN A 192.0.2.15\n";
+const char* const nsec_empty_prev_txt =
+    "t.example.com. 3600 IN NSEC b.*.t.example.com. A NSEC RRSIG\n";
 // Used in NXDOMAIN proof test.  We are going to test some unusual case where
 // the best possible wildcard is below the "next domain" of the NSEC RR that
 // proves the NXDOMAIN, i.e.,
@@ -116,7 +132,6 @@ const char* const nsec_mx_txt =
     "mx.example.com. 3600 IN NSEC ).no.example.com. MX NSEC RRSIG\n";
 const char* const nsec_no_txt =
     ").no.example.com. 3600 IN NSEC nz.no.example.com. AAAA NSEC RRSIG\n";
-
 // We'll also test the case where a single NSEC proves both NXDOMAIN and the
 // non existence of wildcard.  The following records will be used for that
 // test.
@@ -179,7 +194,10 @@ public:
             other_zone_rrs << no_txt << nz_txt <<
             nsec_apex_txt << nsec_mx_txt << nsec_no_txt << nsec_nz_txt <<
             nsec_nxdomain_txt << nsec_www_txt << nonsec_a_txt <<
-            wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt;
+            wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt <<
+            wild_txt_nxrrset << nsec_wild_txt_nxrrset << wild_txt_next <<
+            nsec_wild_txt_next << empty_txt << nsec_empty_txt <<
+            empty_prev_txt << nsec_empty_prev_txt;
 
         masterLoad(zone_stream, origin_, rrclass_,
                    boost::bind(&MockZoneFinder::loadRRset, this, _1));
@@ -396,15 +414,47 @@ MockZoneFinder::find(const Name& name, const RRType& type,
     // hardcoded specific cases, ignoring other details such as canceling
     // due to the existence of closer name.
     if ((options & NO_WILDCARD) == 0) {
-        const Name wild_suffix("wild.example.com");
-        if (name.compare(wild_suffix).getRelation() ==
-            NameComparisonResult::SUBDOMAIN) {
-            domain = domains_.find(Name("*").concatenate(wild_suffix));
-            assert(domain != domains_.end());
-            RRsetStore::const_iterator found_rrset = domain->second.find(type);
-            assert(found_rrset != domain->second.end());
-            return (FindResult(WILDCARD,
-                               substituteWild(*found_rrset->second, name)));
+        const Name wild_suffix(name.split(1));
+        // Unit Tests use those domains for Wildcard test.
+        if (name.equals(Name("www.wild.example.com"))||
+           name.equals(Name("www1.uwild.example.com"))||
+           name.equals(Name("a.t.example.com"))) {
+            if (name.compare(wild_suffix).getRelation() ==
+                NameComparisonResult::SUBDOMAIN) {
+                domain = domains_.find(Name("*").concatenate(wild_suffix));
+                // Matched the QNAME
+                if (domain != domains_.end()) {
+                   RRsetStore::const_iterator found_rrset =
+                       domain->second.find(type);
+                   // Matched the QTYPE
+                   if(found_rrset != domain->second.end()) {
+                    return (FindResult(WILDCARD,
+                            substituteWild(*found_rrset->second, name)));
+                   } else {
+                   // No matched QTYPE, this case is for WILDCARD_NXRRSET
+                     found_rrset = domain->second.find(RRType::NSEC());
+                     assert(found_rrset != domain->second.end());
+                     Name newName = Name("*").concatenate(wild_suffix);
+                     return (FindResult(WILDCARD_NXRRSET,
+                           substituteWild(*found_rrset->second,newName)));
+                   }
+                 } else {
+                    // This is empty non terminal name case on wildcard.
+                    Name emptyName = Name("*").concatenate(wild_suffix);
+                    for (Domains::reverse_iterator it = domains_.rbegin();
+                        it != domains_.rend();
+                        ++it) {
+                            RRsetStore::const_iterator nsec_it;
+                            if ((*it).first < emptyName &&
+                            (nsec_it = (*it).second.find(RRType::NSEC()))
+                            != (*it).second.end()) {
+                                return (FindResult(WILDCARD_NXRRSET,
+                                                   (*nsec_it).second));
+                            }
+                        }
+                }
+                return (FindResult(WILDCARD_NXRRSET,RRsetPtr()));
+             }
         }
         const Name cnamewild_suffix("cnamewild.example.com");
         if (name.compare(cnamewild_suffix).getRelation() ==
@@ -924,6 +974,60 @@ TEST_F(QueryTest, badWildcardProof3) {
                  Query::BadNSEC);
 }
 
+TEST_F(QueryTest, wildcardNxrrsetWithDuplicateNSEC) {
+    // WILDCARD_NXRRSET with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXRRSET and their RRSIGs. In this case we only need one NSEC,
+    // which proves both NXDOMAIN and the non existence RRSETs of wildcard.
+    Query(memory_client, Name("www.wild.example.com"), RRType::TXT(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_wild_txt) +
+                   string("*.wild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardNxrrsetWithNSEC) {
+    // WILDCARD_NXRRSET with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXRRSET and their RRSIGs. In this case we need two NSEC RRs,
+    // one proves NXDOMAIN and the other proves non existence RRSETs of wildcard.
+    Query(memory_client, Name("www1.uwild.example.com"), RRType::TXT(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_wild_txt_nxrrset) +
+                   string("*.uwild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n" +
+                   string(nsec_wild_txt_next) +
+                   string("www.uwild.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC") + "\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
+TEST_F(QueryTest, wildcardEmptyWithNSEC) {
+    // WILDCARD_EMPTY with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXDOMAIN and their RRSIGs. In this case we need two NSEC RRs,
+    // one proves NXDOMAIN and the other proves non existence wildcard.
+    Query(memory_client, Name("a.t.example.com"), RRType::A(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_empty_prev_txt) +
+                   string("t.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n" +
+                   string(nsec_empty_txt) +
+                   string("b.*.t.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
 /*
  * This tests that when there's no SOA and we need a negative answer. It should
  * throw in that case.
diff --git a/src/bin/bind10/bind10.8 b/src/bin/bind10/bind10.8
index 0adcb70..c2e44e7 100644
--- a/src/bin/bind10/bind10.8
+++ b/src/bin/bind10/bind10.8
@@ -2,21 +2,12 @@
 .\"     Title: bind10
 .\"    Author: [see the "AUTHORS" section]
 .\" Generator: DocBook XSL Stylesheets v1.75.2 <http://docbook.sf.net/>
-.\"      Date: August 11, 2011
+.\"      Date: November 23, 2011
 .\"    Manual: BIND10
 .\"    Source: BIND10
 .\"  Language: English
 .\"
-.TH "BIND10" "8" "August 11, 2011" "BIND10" "BIND10"
-.\" -----------------------------------------------------------------
-.\" * Define some portability stuff
-.\" -----------------------------------------------------------------
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.\" http://bugs.debian.org/507673
-.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
-.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-.ie \n(.g .ds Aq \(aq
-.el       .ds Aq '
+.TH "BIND10" "8" "November 23, 2011" "BIND10" "BIND10"
 .\" -----------------------------------------------------------------
 .\" * set default formatting
 .\" -----------------------------------------------------------------
@@ -31,7 +22,7 @@
 bind10 \- BIND 10 boss process
 .SH "SYNOPSIS"
 .HP \w'\fBbind10\fR\ 'u
-\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-brittle\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
+\fBbind10\fR [\fB\-c\ \fR\fB\fIconfig\-filename\fR\fR] [\fB\-m\ \fR\fB\fIfile\fR\fR] [\fB\-n\fR] [\fB\-p\ \fR\fB\fIdata_path\fR\fR] [\fB\-u\ \fR\fB\fIuser\fR\fR] [\fB\-v\fR] [\fB\-w\ \fR\fB\fIwait_time\fR\fR] [\fB\-\-cmdctl\-port\fR\ \fIport\fR] [\fB\-\-config\-file\fR\ \fIconfig\-filename\fR] [\fB\-\-data\-path\fR\ \fIdirectory\fR] [\fB\-\-msgq\-socket\-file\ \fR\fB\fIfile\fR\fR] [\fB\-\-no\-cache\fR] [\fB\-\-pid\-file\fR\ \fIfilename\fR] [\fB\-\-pretty\-name\ \fR\fB\fIname\fR\fR] [\fB\-\-user\ \fR\fB\fIuser\fR\fR] [\fB\-\-verbose\fR] [\fB\-\-wait\ \fR\fB\fIwait_time\fR\fR]
 .SH "DESCRIPTION"
 .PP
 The
@@ -41,13 +32,6 @@ daemon starts up other BIND 10 required daemons\&. It handles restarting of exit
 .PP
 The arguments are as follows:
 .PP
-\fB\-\-brittle\fR
-.RS 4
-Shutdown if any of the child processes of
-\fBbind10\fR
-exit\&. This is intended to help developers debug the server, and should not be used in production\&.
-.RE
-.PP
 \fB\-c\fR \fIconfig\-filename\fR, \fB\-\-config\-file\fR \fIconfig\-filename\fR
 .RS 4
 The configuration filename to use\&. Can be either absolute or relative to data path\&. In case it is absolute, value of data path is not considered\&.
@@ -121,6 +105,204 @@ and its child processes\&.
 .RS 4
 Sets the amount of time that BIND 10 will wait for the configuration manager (a key component of BIND 10) to initialize itself before abandoning the start up and terminating with an error\&. The wait_time is specified in seconds and has a default value of 10\&.
 .RE
+.SH "CONFIGURATION AND COMMANDS"
+.PP
+The configuration provides settings for components for
+\fBbind10\fR
+to manage under
+\fI/Boss/components/\fR\&. The default elements are:
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-auth\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-cmdctl\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/setuid\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-stats\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-stats\-httpd\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-xfrin\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-xfrout\fR
+.RE
+.sp
+.RS 4
+.ie n \{\
+\h'-04'\(bu\h'+03'\c
+.\}
+.el \{\
+.sp -1
+.IP \(bu 2.3
+.\}
+
+\fI/Boss/components/b10\-zonemgr\fR
+.RE
+.PP
+(Note that the startup of
+\fBb10\-sockcreator\fR,
+\fBb10\-cfgmgr\fR, and
+\fBb10\-msgq\fR
+is not configurable\&. It is hardcoded and
+\fBbind10\fR
+will not run without them\&.)
+.PP
+These named sets (listed above) contain the following settings:
+.PP
+\fIaddress\fR
+.RS 4
+The name used for communicating to it on the message bus\&.
+.RE
+.PP
+\fIkind\fR
+.RS 4
+This defines how required a component is\&. The possible settings for
+\fIkind\fR
+are:
+\fIcore\fR
+(system won\'t start if it won\'t start and
+\fBbind10\fR
+will shutdown if a
+\(lqcore\(rq
+component crashes),
+\fIdispensable\fR
+(\fBbind10\fR
+will restart failing component), and
+\fIneeded\fR
+(\fBbind10\fR
+will shutdown if component won\'t initially start, but if crashes later, it will attempt to restart)\&. This setting is required\&.
+.RE
+.PP
+\fIpriority\fR
+.RS 4
+This is an integer\&.
+\fBbind10\fR
+will start the components with largest priority numbers first\&.
+.RE
+.PP
+\fIprocess\fR
+.RS 4
+This is the filename of the executable to be started\&. If not defined, then
+\fBbind10\fR
+will use the component name instead\&.
+.RE
+.PP
+\fIspecial\fR
+.RS 4
+This defines if the component is started a special way\&.
+.RE
+.PP
+The
+\fIBoss\fR
+configuration commands are:
+.PP
+
+\fBgetstats\fR
+tells
+\fBbind10\fR
+to send its statistics data to the
+\fBb10\-stats\fR
+daemon\&. This is an internal command and not exposed to the administrator\&.
+
+.PP
+
+\fBping\fR
+is used to check the connection with the
+\fBbind10\fR
+daemon\&. It returns the text
+\(lqpong\(rq\&.
+.PP
+
+\fBsendstats\fR
+tells
+\fBbind10\fR
+to send its statistics data to the
+\fBb10\-stats\fR
+daemon immediately\&.
+.PP
+
+\fBshow_processes\fR
+lists the current processes managed by
+\fBbind10\fR\&. The output is an array in JSON format containing the process ID and the name for each\&.
+
+
+.PP
+
+\fBshutdown\fR
+tells
+\fBbind10\fR
+to shutdown the BIND 10 servers\&. It will tell each process it manages to shutdown and, when complete,
+\fBbind10\fR
+will exit\&.
 .SH "STATISTICS DATA"
 .PP
 The statistics data collected by the
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index 340a2bb..6705760 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -20,7 +20,7 @@
 <refentry>
 
   <refentryinfo>
-    <date>August 11, 2011</date>
+    <date>November 23, 2011</date>
   </refentryinfo>
 
   <refmeta>
@@ -218,6 +218,204 @@ TODO: configuration section
 -->
 
   <refsect1>
+    <title>CONFIGURATION AND COMMANDS</title>
+
+    <para>
+      The configuration provides settings for components for
+      <command>bind10</command> to manage under
+      <varname>/Boss/components/</varname>.
+      The default elements are:
+    </para>
+
+    <itemizedlist>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-auth</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-cmdctl</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/setuid</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-stats</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-stats-httpd</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-xfrin</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-xfrout</varname> </para>
+      </listitem>
+
+      <listitem>
+        <para> <varname>/Boss/components/b10-zonemgr</varname> </para>
+      </listitem>
+
+    </itemizedlist>
+
+    <para>
+      (Note that the startup of <command>b10-sockcreator</command>,
+      <command>b10-cfgmgr</command>, and <command>b10-msgq</command>
+      is not configurable. It is hardcoded and <command>bind10</command>
+      will not run without them.)
+    </para>
+
+    <para>
+      These named sets (listed above) contain the following settings:
+    </para>
+
+    <variablelist>
+
+      <varlistentry>
+        <term><varname>address</varname></term>
+        <listitem>
+	  <para>The name used for communicating to it on the message
+	  bus.</para>
+<!-- NOTE: vorner said:
+These can be null, because the components are special ones, and
+the special class there already knows the address. It is (I hope)
+explained in the guide. I'd like to get rid of the special components
+sometime and I'd like it to teach to guess the address.
+-->
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+        <term><varname>kind</varname></term>
+        <listitem>
+          <para>
+            This defines how required a component is.
+            The possible settings for <varname>kind</varname> are:
+            <varname>core</varname> (system won't start if it won't
+            start and <command>bind10</command> will shutdown if
+            a <quote>core</quote> component crashes),
+            <varname>dispensable</varname> (<command>bind10</command>
+            will restart failing component),
+            and
+	    <varname>needed</varname> (<command>bind10</command>
+	    will shutdown if component won't initially start, but
+	    if crashes later, it will attempt to restart).
+            This setting is required.
+<!-- TODO: formatting -->
+          </para>
+        </listitem>
+      </varlistentry>
+
+<!--
+TODO: currently not used
+      <varlistentry>
+        <term> <varname>params</varname> </term>
+        <listitem>
+          <para>
+list
+</para>
+        </listitem>
+      </varlistentry>
+-->
+
+      <varlistentry>
+        <term> <varname>priority</varname> </term>
+        <listitem>
+          <para>This is an integer. <command>bind10</command>
+            will start the components with largest priority numbers first.
+          </para>
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+          <term> <varname>process</varname> </term>
+        <listitem>
+          <para>This is the filename of the executable to be started.
+            If not defined, then <command>bind10</command> will
+            use the component name instead.
+          </para>
+        </listitem>
+      </varlistentry>
+
+      <varlistentry>
+          <term> <varname>special</varname> </term>
+        <listitem>
+          <para>
+            This defines if the component is started a special
+            way.
+<!--
+TODO: document this ... but maybe some of these will be removed
+once we get rid of some using switches for components?
+
+auth
+cfgmgr
+cmdctl
+msgq
+resolver
+setuid
+sockcreator
+xfrin
+-->
+
+</para>
+        </listitem>
+      </varlistentry>
+
+    </variablelist>
+
+<!-- TODO: formating -->
+    <para>
+      The <varname>Boss</varname> configuration commands are:
+    </para>
+<!-- TODO: let's just let bind10 be known as bind10 and not Boss -->
+
+    <para>
+      <command>getstats</command> tells <command>bind10</command>
+      to send its statistics data to the <command>b10-stats</command>
+      daemon.
+      This is an internal command and not exposed to the administrator.
+<!-- not defined in spec -->
+<!-- TODO: explain difference with sendstat -->
+    </para>
+
+    <para>
+      <command>ping</command> is used to check the connection with the
+      <command>bind10</command> daemon.
+      It returns the text <quote>pong</quote>.
+    </para>
+
+    <para>
+      <command>sendstats</command> tells <command>bind10</command>
+      to send its statistics data to the <command>b10-stats</command>
+      daemon immediately.
+<!-- TODO: compare with internal command getstats? -->
+    </para>
+
+    <para>
+      <command>show_processes</command> lists the current processes
+      managed by <command>bind10</command>.
+      The output is an array in JSON format containing the process
+      ID and the name for each.
+<!-- TODO: what is name? -->
+<!-- TODO: change to JSON object format? -->
+<!-- TODO: ticket #1406 -->
+    </para>
+
+    <para>
+      <command>shutdown</command> tells <command>bind10</command>
+      to shutdown the BIND 10 servers.
+      It will tell each process it manages to shutdown and, when
+      complete, <command>bind10</command> will exit.
+    </para>
+
+  </refsect1>
+
+  <refsect1>
     <title>STATISTICS DATA</title>
 
     <para>
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index d850e47..79635fd 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -99,6 +99,12 @@ The boss module is sending a kill signal to process with the given name,
 as part of the process of killing all started processes during a failed
 startup, as described for BIND10_KILLING_ALL_PROCESSES
 
+% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
+A connection from one of the applications which requested a socket was
+closed. This means the application has terminated, so all the sockets it was
+using are now closed and bind10 process can release them as well, unless the
+same sockets are used by yet another application.
+
 % BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
 There already appears to be a message bus daemon running. Either an
 old process was not shut down correctly, and needs to be killed, or
@@ -110,6 +116,11 @@ While listening on the message bus channel for messages, it suddenly
 disappeared. The msgq daemon may have died. This might lead to an
 inconsistent state of the system, and BIND 10 will now shut down.
 
+% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
+An error occurred when the bind10 process was asked to send a socket file
+descriptor. The error is mentioned, most common reason is that the request
+is invalid and may not come from bind10 process at all.
+
 % BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
 This indicates a process started previously terminated. The process id
 and component owning the process are indicated, as well as the exit code.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 68ff97b..00858d8 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -72,6 +72,9 @@ import isc.log
 from isc.log_messages.bind10_messages import *
 import isc.bind10.component
 import isc.bind10.special_component
+import isc.bind10.socket_cache
+import libutil_io_python
+import tempfile
 
 isc.log.init("b10-boss")
 logger = isc.log.Logger("boss")
@@ -81,6 +84,10 @@ logger = isc.log.Logger("boss")
 DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
 DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
 
+# Messages sent over the unix domain socket to indicate if it is followed by a real socket
+CREATOR_SOCKET_OK = "1\n"
+CREATOR_SOCKET_UNAVAILABLE = "0\n"
+
 # Assign this process some longer name
 isc.util.process.rename(sys.argv[0])
 
@@ -92,51 +99,6 @@ VERSION = "bind10 20110223 (BIND 10 @PACKAGE_VERSION@)"
 # This is for boot_time of Boss
 _BASETIME = time.gmtime()
 
-class RestartSchedule:
-    """
-Keeps state when restarting something (in this case, a process).
-
-When a process dies unexpectedly, we need to restart it. However, if 
-it fails to restart for some reason, then we should not simply keep
-restarting it at high speed.
-
-A more sophisticated algorithm can be developed, but for now we choose
-a simple set of rules:
-
-  * If a process was been running for >=10 seconds, we restart it
-    right away.
-  * If a process was running for <10 seconds, we wait until 10 seconds
-    after it was started.
-
-To avoid programs getting into lockstep, we use a normal distribution
-to avoid being restarted at exactly 10 seconds."""
-
-    def __init__(self, restart_frequency=10.0):
-        self.restart_frequency = restart_frequency
-        self.run_start_time = None
-        self.run_stop_time = None
-        self.restart_time = None
-    
-    def set_run_start_time(self, when=None):
-        if when is None:
-            when = time.time()
-        self.run_start_time = when
-        sigma = self.restart_frequency * 0.05
-        self.restart_time = when + random.normalvariate(self.restart_frequency, 
-                                                        sigma)
-
-    def set_run_stop_time(self, when=None):
-        """We don't actually do anything with stop time now, but it 
-        might be useful for future algorithms."""
-        if when is None:
-            when = time.time()
-        self.run_stop_time = when
-
-    def get_restart_time(self, when=None):
-        if when is None:
-            when = time.time()
-        return max(when, self.restart_time)
-
 class ProcessInfoError(Exception): pass
 
 class ProcessInfo:
@@ -151,7 +113,6 @@ class ProcessInfo:
         self.env = env
         self.dev_null_stdout = dev_null_stdout
         self.dev_null_stderr = dev_null_stderr
-        self.restart_schedule = RestartSchedule()
         self.uid = uid
         self.username = username
         self.process = None
@@ -200,7 +161,6 @@ class ProcessInfo:
                                         env=spawn_env,
                                         preexec_fn=self._preexec_work)
         self.pid = self.process.pid
-        self.restart_schedule.set_run_start_time()
 
     # spawn() and respawn() are the same for now, but in the future they
     # may have different functionality
@@ -239,13 +199,7 @@ class BoB:
         """
         self.cc_session = None
         self.ccs = None
-        self.cfg_start_auth = True
-        self.cfg_start_resolver = False
-        self.cfg_start_dhcp6 = False
-        self.cfg_start_dhcp4 = False
         self.curproc = None
-        # XXX: Not used now, waits for reintroduction of restarts.
-        self.dead_processes = {}
         self.msgq_socket_file = msgq_socket_file
         self.nocache = nocache
         self.component_config = {}
@@ -254,6 +208,9 @@ class BoB:
         # inapropriate. But as the code isn't probably completely ready
         # for it, we leave it at components for now.
         self.components = {}
+        # Simply list of components that died and need to wait for a
+        # restart. Components manage their own restart schedule now
+        self.components_to_restart = []
         self.runnable = False
         self.uid = setuid
         self.username = username
@@ -291,6 +248,12 @@ class BoB:
         # If -v was set, enable full debug logging.
         if self.verbose:
             logger.set_severity("DEBUG", 99)
+        # This is set in init_socket_srv
+        self._socket_path = None
+        self._socket_cache = None
+        self._tmpdir = None
+        self._srv_socket = None
+        self._unix_sockets = {}
 
     def __propagate_component_config(self, config):
         comps = dict(config)
@@ -365,6 +328,18 @@ class BoB:
             elif command == "show_processes":
                 answer = isc.config.ccsession. \
                     create_answer(0, self.get_processes())
+            elif command == "get_socket":
+                answer = self._get_socket(args)
+            elif command == "drop_socket":
+                if "token" not in args:
+                    answer = isc.config.ccsession. \
+                        create_answer(1, "Missing token parameter")
+                else:
+                    try:
+                        self._socket_cache.drop_socket(args["token"])
+                        answer = isc.config.ccsession.create_answer(0)
+                    except Exception as e:
+                        answer = isc.config.ccsession.create_answer(1, str(e))
             else:
                 answer = isc.config.ccsession.create_answer(1,
                                                             "Unknown command")
@@ -624,33 +599,6 @@ class BoB:
         # ... and start
         return self.start_process("b10-resolver", resargs, self.c_channel_env)
 
-    def __ld_path_hack(self):
-        # XXX: a quick-hack workaround.  xfrin/out will implicitly use
-        # dynamically loadable data source modules, which will be installed in
-        # $(libdir).
-        # On some OSes (including MacOS X and *BSDs) the main process (python)
-        # cannot find the modules unless they are located in a common shared
-        # object path or a path in the (DY)LD_LIBRARY_PATH.  We should seek
-        # a cleaner solution, but for a short term workaround we specify the
-        # path here, unconditionally, and without even bothering which
-        # environment variable should be used.
-        #
-        # We reuse the ADD_LIBEXEC_PATH variable to see whether we need to
-        # do this, as the conditions that make this workaround needed are
-        # the same as for the libexec path addition
-        # TODO: Once #1292 is finished, remove this method and the special
-        # component, use it as normal component.
-        env = dict(self.c_channel_env)
-        if ADD_LIBEXEC_PATH:
-            cur_path = os.getenv('DYLD_LIBRARY_PATH')
-            cur_path = '' if cur_path is None else ':' + cur_path
-            env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
-
-            cur_path = os.getenv('LD_LIBRARY_PATH')
-            cur_path = '' if cur_path is None else ':' + cur_path
-            env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
-        return env
-
     def start_cmdctl(self):
         """
             Starts the command control process
@@ -663,22 +611,6 @@ class BoB:
         return self.start_process("b10-cmdctl", args, self.c_channel_env,
                                   self.cmdctl_port)
 
-    def start_xfrin(self):
-        # Set up the command arguments.
-        args = ['b10-xfrin']
-        if self.verbose:
-            args += ['-v']
-
-        return self.start_process("b10-xfrin", args, self.__ld_path_hack())
-
-    def start_xfrout(self):
-        # Set up the command arguments.
-        args = ['b10-xfrout']
-        if self.verbose:
-            args += ['-v']
-
-        return self.start_process("b10-xfrout", args, self.__ld_path_hack())
-
     def start_all_components(self):
         """
             Starts up all the components.  Any exception generated during the
@@ -750,7 +682,7 @@ class BoB:
 
         If we did not start yet, it raises an exception, which is meant
         to propagate through the component and configurator to the startup
-        routine and abort the startup imediatelly. If it is started up already,
+        routine and abort the startup immediately. If it is started up already,
         we just mark it so we terminate soon.
 
         It does set the exit code in both cases.
@@ -825,7 +757,11 @@ class BoB:
                     # Tell it it failed. But only if it matters (we are
                     # not shutting down and the component considers itself
                     # to be running.
-                    component.failed(exit_status);
+                    component_restarted = component.failed(exit_status);
+                    # if the process wants to be restarted, but not just yet,
+                    # it returns False
+                    if not component_restarted:
+                        self.components_to_restart.append(component)
             else:
                 logger.info(BIND10_UNKNOWN_CHILD_PROCESS_ENDED, pid)
 
@@ -841,39 +777,225 @@ class BoB:
             timeout value.
 
         """
-        # TODO: This is an artefact of previous way of handling processes. The
-        # restart queue is currently empty at all times, so this returns None
-        # every time it is called (thought is a relict that is obviously wrong,
-        # it is called and it doesn't hurt).
-        #
-        # It is preserved for archeological reasons for the time when we return
-        # the delayed restarts, most of it might be useful then (or, if it is
-        # found useless, removed).
-        next_restart = None
-        # if we're shutting down, then don't restart
         if not self.runnable:
             return 0
-        # otherwise look through each dead process and try to restart
-        still_dead = {}
+        still_dead = []
+        # keep track of the first time we need to check this queue again,
+        # if at all
+        next_restart_time = None
         now = time.time()
-        for proc_info in self.dead_processes.values():
-            restart_time = proc_info.restart_schedule.get_restart_time(now)
-            if restart_time > now:
-                if (next_restart is None) or (next_restart > restart_time):
-                    next_restart = restart_time
-                still_dead[proc_info.pid] = proc_info
+        for component in self.components_to_restart:
+            if not component.restart(now):
+                still_dead.append(component)
+                if next_restart_time is None or\
+                   next_restart_time > component.get_restart_time():
+                    next_restart_time = component.get_restart_time()
+        self.components_to_restart = still_dead
+
+        return next_restart_time
+
+    def _get_socket(self, args):
+        """
+        Implementation of the get_socket CC command. It asks the cache
+        to provide the token and sends the information back.
+        """
+        try:
+            try:
+                addr = isc.net.parse.addr_parse(args['address'])
+                port = isc.net.parse.port_parse(args['port'])
+                protocol = args['protocol']
+                if protocol not in ['UDP', 'TCP']:
+                    raise ValueError("Protocol must be either UDP or TCP")
+                share_mode = args['share_mode']
+                if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
+                    raise ValueError("Share mode must be one of ANY, SAMEAPP" +
+                                     " or NO")
+                share_name = args['share_name']
+            except KeyError as ke:
+                return \
+                    isc.config.ccsession.create_answer(1,
+                                                       "Missing parameter " +
+                                                       str(ke))
+
+            # FIXME: This call contains blocking IPC. It is expected to be
+            # short, but if it turns out to be problem, we'll need to do
+            # something about it.
+            token = self._socket_cache.get_token(protocol, addr, port,
+                                                 share_mode, share_name)
+            return isc.config.ccsession.create_answer(0, {
+                'token': token,
+                'path': self._socket_path
+            })
+        except Exception as e:
+            return isc.config.ccsession.create_answer(1, str(e))
+
+    def socket_request_handler(self, token, unix_socket):
+        """
+        This function handles a token that comes over a unix_domain socket.
+        The function looks into the _socket_cache and sends the socket
+        identified by the token back over the unix_socket.
+        """
+        try:
+            fd = self._socket_cache.get_socket(token, unix_socket.fileno())
+            # FIXME: These two calls are blocking in their nature. An OS-level
+            # buffer is likely to be large enough to hold all these data, but
+            # if it wasn't and the remote application got stuck, we would have
+            # a problem. If there appear such problems, we should do something
+            # about it.
+            unix_socket.sendall(CREATOR_SOCKET_OK)
+            libutil_io_python.send_fd(unix_socket.fileno(), fd)
+        except Exception as e:
+            logger.info(BIND10_NO_SOCKET, token, e)
+            unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
+
+    def socket_consumer_dead(self, unix_socket):
+        """
+        This function handles when a unix_socket closes. This means all
+        sockets sent to it are to be considered closed. This function signals
+        so to the _socket_cache.
+        """
+        logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
+        try:
+            self._socket_cache.drop_application(unix_socket.fileno())
+        except ValueError:
+            # This means the application holds no sockets. It's harmless, as it
+            # can happen in real life - for example, it requests a socket, but
+            # get_socket doesn't find it, so the application dies. It should be
+            # rare, though.
+            pass
+
+    def set_creator(self, creator):
+        """
+        Registeres a socket creator into the boss. The socket creator is not
+        used directly, but through a cache. The cache is created in this
+        method.
+
+        If called more than once, it raises a ValueError.
+        """
+        if self._socket_cache is not None:
+            raise ValueError("A creator was inserted previously")
+        self._socket_cache = isc.bind10.socket_cache.Cache(creator)
+
+    def init_socket_srv(self):
+        """
+        Creates and listens on a unix-domain socket to be able to send out
+        the sockets.
+
+        This method should be called after switching user, or the switched
+        applications won't be able to access the socket.
+        """
+        self._srv_socket = socket.socket(socket.AF_UNIX)
+        # We create a temporary directory somewhere safe and unique, to avoid
+        # the need to find the place ourself or bother users. Also, this
+        # secures the socket on some platforms, as it creates a private
+        # directory.
+        self._tmpdir = tempfile.mkdtemp()
+        # Get the name
+        self._socket_path = os.path.join(self._tmpdir, "sockcreator")
+        # And bind the socket to the name
+        self._srv_socket.bind(self._socket_path)
+        self._srv_socket.listen(5)
+
+    def remove_socket_srv(self):
+        """
+        Closes and removes the listening socket and the directory where it
+        lives, as we created both.
+
+        It does nothing if the _srv_socket is not set (eg. it was not yet
+        initialized).
+        """
+        if self._srv_socket is not None:
+            self._srv_socket.close()
+            os.remove(self._socket_path)
+            os.rmdir(self._tmpdir)
+
+    def _srv_accept(self):
+        """
+        Accept a socket from the unix domain socket server and put it to the
+        others we care about.
+        """
+        socket = self._srv_socket.accept()
+        self._unix_sockets[socket.fileno()] = (socket, b'')
+
+    def _socket_data(self, socket_fileno):
+        """
+        This is called when a socket identified by the socket_fileno needs
+        attention. We try to read data from there. If it is closed, we remove
+        it.
+        """
+        (sock, previous) = self._unix_sockets[socket_fileno]
+        while True:
+            try:
+                data = sock.recv(1, socket.MSG_DONTWAIT)
+            except socket.error as se:
+                # These two might be different on some systems
+                if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
+                    # No more data now. Oh, well, just store what we have.
+                    self._unix_sockets[socket_fileno] = (sock, previous)
+                    return
+                else:
+                    data = b'' # Pretend it got closed
+            if len(data) == 0: # The socket got to it's end
+                del self._unix_sockets[socket_fileno]
+                self.socket_consumer_dead(sock)
+                sock.close()
+                return
             else:
-                logger.info(BIND10_RESURRECTING_PROCESS, proc_info.name)
-                try:
-                    proc_info.respawn()
-                    self.components[proc_info.pid] = proc_info
-                    logger.info(BIND10_RESURRECTED_PROCESS, proc_info.name, proc_info.pid)
-                except:
-                    still_dead[proc_info.pid] = proc_info
-        # remember any processes that refuse to be resurrected
-        self.dead_processes = still_dead
-        # return the time when the next process is ready to be restarted
-        return next_restart
+                if data == b"\n":
+                    # Handle this token and clear it
+                    self.socket_request_handler(previous, sock)
+                    previous = b''
+                else:
+                    previous += data
+
+    def run(self, wakeup_fd):
+        """
+        The main loop, waiting for sockets, commands and dead processes.
+        Runs as long as the runnable is true.
+
+        The wakeup_fd descriptor is the read end of pipe where CHLD signal
+        handler writes.
+        """
+        ccs_fd = self.ccs.get_socket().fileno()
+        while self.runnable:
+            # clean up any processes that exited
+            self.reap_children()
+            next_restart = self.restart_processes()
+            if next_restart is None:
+                wait_time = None
+            else:
+                wait_time = max(next_restart - time.time(), 0)
+
+            # select() can raise EINTR when a signal arrives,
+            # even if they are resumable, so we have to catch
+            # the exception
+            try:
+                (rlist, wlist, xlist) = \
+                    select.select([wakeup_fd, ccs_fd,
+                                   self._srv_socket.fileno()] +
+                                   list(self._unix_sockets.keys()), [], [],
+                                  wait_time)
+            except select.error as err:
+                if err.args[0] == errno.EINTR:
+                    (rlist, wlist, xlist) = ([], [], [])
+                else:
+                    logger.fatal(BIND10_SELECT_ERROR, err)
+                    break
+
+            for fd in rlist + xlist:
+                if fd == ccs_fd:
+                    try:
+                        self.ccs.check_command()
+                    except isc.cc.session.ProtocolError:
+                        logger.fatal(BIND10_MSGQ_DISAPPEARED)
+                        self.runnable = False
+                        break
+                elif fd == wakeup_fd:
+                    os.read(wakeup_fd, 32)
+                elif fd == self._srv_socket.fileno():
+                    self._srv_accept()
+                elif fd in self._unix_sockets:
+                    self._socket_data(fd)
 
 # global variables, needed for signal handlers
 options = None
@@ -1037,64 +1159,32 @@ def main():
     # Block SIGPIPE, as we don't want it to end this process
     signal.signal(signal.SIGPIPE, signal.SIG_IGN)
 
-    # Go bob!
-    boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
-                       options.config_file, options.nocache, options.verbose,
-                       setuid, username, options.cmdctl_port,
-                       options.wait_time)
-    startup_result = boss_of_bind.startup()
-    if startup_result:
-        logger.fatal(BIND10_STARTUP_ERROR, startup_result)
-        sys.exit(1)
-    logger.info(BIND10_STARTUP_COMPLETE)
-    dump_pid(options.pid_file)
-
-    # In our main loop, we check for dead processes or messages 
-    # on the c-channel.
-    wakeup_fd = wakeup_pipe[0]
-    ccs_fd = boss_of_bind.ccs.get_socket().fileno()
-    while boss_of_bind.runnable:
-        # clean up any processes that exited
-        boss_of_bind.reap_children()
-        # XXX: As we don't put anything into the processes to be restarted,
-        # this is really a complicated NOP. But we will try to reintroduce
-        # delayed restarts, so it stays here for now, until we find out if
-        # it's useful.
-        next_restart = boss_of_bind.restart_processes()
-        if next_restart is None:
-            wait_time = None
-        else:
-            wait_time = max(next_restart - time.time(), 0)
-
-        # select() can raise EINTR when a signal arrives, 
-        # even if they are resumable, so we have to catch
-        # the exception
-        try:
-            (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [], 
-                                                  wait_time)
-        except select.error as err:
-            if err.args[0] == errno.EINTR:
-                (rlist, wlist, xlist) = ([], [], [])
-            else:
-                logger.fatal(BIND10_SELECT_ERROR, err)
-                break
-
-        for fd in rlist + xlist:
-            if fd == ccs_fd:
-                try:
-                    boss_of_bind.ccs.check_command()
-                except isc.cc.session.ProtocolError:
-                    logger.fatal(BIND10_MSGQ_DISAPPEARED)
-                    self.runnable = False
-                    break
-            elif fd == wakeup_fd:
-                os.read(wakeup_fd, 32)
-
-    # shutdown
-    signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-    boss_of_bind.shutdown()
-    unlink_pid_file(options.pid_file)
-    sys.exit(0)
+    try:
+        # Go bob!
+        boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+                           options.config_file, options.nocache,
+                           options.verbose, setuid, username,
+                           options.cmdctl_port, options.wait_time)
+        startup_result = boss_of_bind.startup()
+        if startup_result:
+            logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+            sys.exit(1)
+        boss_of_bind.init_socket_srv()
+        logger.info(BIND10_STARTUP_COMPLETE)
+        dump_pid(options.pid_file)
+
+        # Let it run
+        boss_of_bind.run(wakeup_pipe[0])
+
+        # shutdown
+        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+        boss_of_bind.shutdown()
+    finally:
+        # Clean up the filesystem
+        unlink_pid_file(options.pid_file)
+        if boss_of_bind is not None:
+            boss_of_bind.remove_socket_srv()
+    sys.exit(boss_of_bind.exitcode)
 
 if __name__ == "__main__":
     main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 4267b70..adc9798 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -14,8 +14,8 @@
             "priority": 5,
             "kind": "dispensable"
           },
-          "b10-xfrin": { "special": "xfrin", "kind": "dispensable" },
-          "b10-xfrout": { "special": "xfrout", "kind": "dispensable" },
+          "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+          "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
           "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
           "b10-stats": { "address": "Stats", "kind": "dispensable" },
           "b10-stats-httpd": {
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index e323113..f9537fd 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,11 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
+# Most of the time, we omit the "bind10_src" for brevity. Sometimes,
+# we want to be explicit about what we do, like when hijacking a library
+# call used by the bind10_src.
 from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+import bind10_src
 
 # XXX: environment tests are currently disabled, due to the preprocessor
 #      setup that we have now complicating the environment
@@ -28,6 +32,8 @@ from isc.net.addr import IPAddr
 import time
 import isc
 import isc.log
+import isc.bind10.socket_cache
+import errno
 
 from isc.testutils.parse_args import TestOptParser, OptsError
 
@@ -97,6 +103,232 @@ class TestProcessInfo(unittest.TestCase):
         self.assertTrue(type(pi.pid) is int)
         self.assertNotEqual(pi.pid, old_pid)
 
+class TestCacheCommands(unittest.TestCase):
+    """
+    Test methods of boss related to the socket cache and socket handling.
+    """
+    def setUp(self):
+        """
+        Prepare the boss for some tests.
+
+        Also prepare some variables we need.
+        """
+        self.__boss = BoB()
+        # Fake the cache here so we can pretend it is us and hijack the
+        # calls to its methods.
+        self.__boss._socket_cache = self
+        self.__boss._socket_path = '/socket/path'
+        self.__raise_exception = None
+        self.__socket_args = {
+            "port": 53,
+            "address": "::",
+            "protocol": "UDP",
+            "share_mode": "ANY",
+            "share_name": "app"
+        }
+        # What was and wasn't called.
+        self.__drop_app_called = None
+        self.__get_socket_called = None
+        self.__send_fd_called = None
+        self.__get_token_called = None
+        self.__drop_socket_called = None
+        bind10_src.libutil_io_python.send_fd = self.__send_fd
+
+    def __send_fd(self, to, socket):
+        """
+        A function to hook the send_fd in the bind10_src.
+        """
+        self.__send_fd_called = (to, socket)
+
+    class FalseSocket:
+        """
+        A socket where we can fake methods we need instead of having a real
+        socket.
+        """
+        def __init__(self):
+            self.send = ""
+        def fileno(self):
+            """
+            The file number. Used for identifying the remote application.
+            """
+            return 42
+
+        def sendall(self, data):
+            """
+            Adds data to the self.send.
+            """
+            self.send += data
+
+    def drop_application(self, application):
+        """
+        Part of pretending to be the cache. Logs the parameter to
+        self.__drop_app_called.
+
+        In the case self.__raise_exception is set, the exception there
+        is raised instead.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__drop_app_called = application
+
+    def test_consumer_dead(self):
+        """
+        Test that it calls the drop_application method of the cache.
+        """
+        self.__boss.socket_consumer_dead(self.FalseSocket())
+        self.assertEqual(42, self.__drop_app_called)
+
+    def test_consumer_dead_invalid(self):
+        """
+        Test that it doesn't crash in case the application is not known to
+        the cache, the boss doesn't crash, as this actually can happen in
+        practice.
+        """
+        self.__raise_exception = ValueError("This application is unknown")
+        # This doesn't crash
+        self.__boss.socket_consumer_dead(self.FalseSocket())
+
+    def get_socket(self, token, application):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the call is logged
+        into __get_socket_called and a number is returned.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__get_socket_called = (token, application)
+        return 13
+
+    def test_request_handler(self):
+        """
+        Test that a request for socket is forwarded and the socket is sent
+        back, if it returns a socket.
+        """
+        socket = self.FalseSocket()
+        # An exception from the cache
+        self.__raise_exception = ValueError("Test value error")
+        self.__boss.socket_request_handler("token", socket)
+        # It was called, but it threw, so it is not noted here
+        self.assertIsNone(self.__get_socket_called)
+        self.assertEqual("0\n", socket.send)
+        # It should not have sent any socket.
+        self.assertIsNone(self.__send_fd_called)
+        # Now prepare a valid scenario
+        self.__raise_exception = None
+        socket.send = ""
+        self.__boss.socket_request_handler("token", socket)
+        self.assertEqual("1\n", socket.send)
+        self.assertEqual((42, 13), self.__send_fd_called)
+        self.assertEqual(("token", 42), self.__get_socket_called)
+
+    def get_token(self, protocol, address, port, share_mode, share_name):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the parameters are
+        logged into __get_token_called and a token is returned.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__get_token_called = (protocol, address, port, share_mode,
+                                   share_name)
+        return "token"
+
+    def test_get_socket_ok(self):
+        """
+        Test the successful scenario of getting a socket.
+        """
+        result = self.__boss._get_socket(self.__socket_args)
+        [code, answer] = result['result']
+        self.assertEqual(0, code)
+        self.assertEqual({
+            'token': 'token',
+            'path': '/socket/path'
+        }, answer)
+        addr = self.__get_token_called[1]
+        self.assertTrue(isinstance(addr, IPAddr))
+        self.assertEqual("::", str(addr))
+        self.assertEqual(("UDP", addr, 53, "ANY", "app"),
+                         self.__get_token_called)
+
+    def test_get_socket_error(self):
+        """
+        Test that bad inputs are handled correctly, etc.
+        """
+        def check_code(code, args):
+            """
+            Pass the args there and check if it returns success or not.
+
+            The rest is not tested, as it is already checked in the
+            test_get_socket_ok.
+            """
+            [rcode, ranswer] = self.__boss._get_socket(args)['result']
+            self.assertEqual(code, rcode)
+            if code == 1:
+                # This should be an error message. The exact formatting
+                # is unknown, but we check it is string at least
+                self.assertTrue(isinstance(ranswer, str))
+        def mod_args(name, value):
+            """
+            Override a parameter in the args.
+            """
+            result = dict(self.__socket_args)
+            result[name] = value
+            return result
+
+        # Port too large
+        check_code(1, mod_args('port', 65536))
+        # Not numeric address
+        check_code(1, mod_args('address', 'example.org.'))
+        # Some bad values of enum-like params
+        check_code(1, mod_args('protocol', 'BAD PROTO'))
+        check_code(1, mod_args('share_mode', 'BAD SHARE'))
+        # Check missing parameters
+        for param in self.__socket_args.keys():
+            args = dict(self.__socket_args)
+            del args[param]
+            check_code(1, args)
+        # These are OK values for the enum-like parameters
+        # The ones from test_get_socket_ok are not tested here
+        check_code(0, mod_args('protocol', 'TCP'))
+        check_code(0, mod_args('share_mode', 'SAMEAPP'))
+        check_code(0, mod_args('share_mode', 'NO'))
+        # If an exception is raised from within the cache, it is converted
+        # to an error, not propagated
+        self.__raise_exception = Exception("Test exception")
+        check_code(1, self.__socket_args)
+
+    def drop_socket(self, token):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the parameter is stored
+        in __drop_socket_called.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__drop_socket_called = token
+
+    def test_drop_socket(self):
+        """
+        Check the drop_socket command. It should directly call the method
+        on the cache. Exceptions should be translated to error messages.
+        """
+        # This should be OK and just propagated to the call.
+        self.assertEqual({"result": [0]},
+                         self.__boss.command_handler("drop_socket",
+                                                     {"token": "token"}))
+        self.assertEqual("token", self.__drop_socket_called)
+        self.__drop_socket_called = None
+        # Missing parameter
+        self.assertEqual({"result": [1, "Missing token parameter"]},
+                         self.__boss.command_handler("drop_socket", {}))
+        self.assertIsNone(self.__drop_socket_called)
+        # An exception is raised from within the cache
+        self.__raise_exception = ValueError("Test error")
+        self.assertEqual({"result": [1, "Test error"]},
+                         self.__boss.command_handler("drop_socket",
+                         {"token": "token"}))
+
+
 class TestBoB(unittest.TestCase):
     def test_init(self):
         bob = BoB()
@@ -105,16 +337,26 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.cc_session, None)
         self.assertEqual(bob.ccs, None)
         self.assertEqual(bob.components, {})
-        self.assertEqual(bob.dead_processes, {})
         self.assertEqual(bob.runnable, False)
         self.assertEqual(bob.uid, None)
         self.assertEqual(bob.username, None)
         self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
+        self.assertIsNone(bob._socket_cache)
 
-        self.assertEqual(bob.cfg_start_dhcp4, False)
-        self.assertEqual(bob.cfg_start_dhcp6, False)
+    def test_set_creator(self):
+        """
+        Test the call to set_creator. First time, the cache is created
+        with the passed creator. The next time, it throws an exception.
+        """
+        bob = BoB()
+        # The cache doesn't use it at start, so just create an empty class
+        class Creator: pass
+        creator = Creator()
+        bob.set_creator(creator)
+        self.assertTrue(isinstance(bob._socket_cache,
+                        isc.bind10.socket_cache.Cache))
+        self.assertEqual(creator, bob._socket_cache._creator)
+        self.assertRaises(ValueError, bob.set_creator, creator)
 
     def test_init_alternate_socket(self):
         bob = BoB("alt_socket_file")
@@ -123,15 +365,10 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.cc_session, None)
         self.assertEqual(bob.ccs, None)
         self.assertEqual(bob.components, {})
-        self.assertEqual(bob.dead_processes, {})
         self.assertEqual(bob.runnable, False)
         self.assertEqual(bob.uid, None)
         self.assertEqual(bob.username, None)
         self.assertEqual(bob.nocache, False)
-        self.assertEqual(bob.cfg_start_auth, True)
-        self.assertEqual(bob.cfg_start_resolver, False)
-        self.assertEqual(bob.cfg_start_dhcp4, False)
-        self.assertEqual(bob.cfg_start_dhcp6, False)
 
     def test_command_handler(self):
         class DummySession():
@@ -194,6 +431,26 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.command_handler("__UNKNOWN__", None),
                          isc.config.ccsession.create_answer(1, "Unknown command"))
 
+        # Fake the get_token of cache and test the command works
+        bob._socket_path = '/socket/path'
+        class cache:
+            def get_token(self, protocol, addr, port, share_mode, share_name):
+                return str(addr) + ':' + str(port)
+        bob._socket_cache = cache()
+        args = {
+            "port": 53,
+            "address": "0.0.0.0",
+            "protocol": "UDP",
+            "share_mode": "ANY",
+            "share_name": "app"
+        }
+        # at all and this is the easiest way to check.
+        self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
+                                         'path': '/socket/path'}]},
+                         bob.command_handler("get_socket", args))
+        # The drop_socket is not tested here, but in TestCacheCommands.
+        # It needs the cache mocks to be in place and they are there.
+
 # Class for testing the BoB without actually starting processes.
 # This is used for testing the start/stop components routines and
 # the BoB commands.
@@ -279,7 +536,9 @@ class MockBob(BoB):
                     'b10-stats-httpd': self.start_stats_httpd,
                     'b10-cmdctl': self.start_cmdctl,
                     'b10-dhcp6': self.start_dhcp6,
-                    'b10-dhcp4': self.start_dhcp4 }
+                    'b10-dhcp4': self.start_dhcp4,
+                    'b10-xfrin': self.start_xfrin,
+                    'b10-xfrout': self.start_xfrout }
         return procmap[name]()
 
     def start_xfrout(self):
@@ -474,8 +733,9 @@ class TestStartStopProcessesBob(unittest.TestCase):
         if start_auth:
             config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
             config['b10-xfrout'] = { 'kind': 'dispensable',
-                                     'special': 'xfrout' }
-            config['b10-xfrin'] = { 'kind': 'dispensable', 'special': 'xfrin' }
+                                     'address': 'Xfrout' }
+            config['b10-xfrin'] = { 'kind': 'dispensable',
+                                    'address': 'Xfrin' }
             config['b10-zonemgr'] = { 'kind': 'dispensable',
                                       'address': 'Zonemgr' }
         if start_resolver:
@@ -887,9 +1147,9 @@ class TestBossComponents(unittest.TestCase):
             (anyway it is not told so). It does not die if it is killed
             the first time. It dies only when killed forcefully.
             """
-            def kill(self, forcefull=False):
-                killed.append(forcefull)
-                if forcefull:
+            def kill(self, forceful=False):
+                killed.append(forceful)
+                if forceful:
                     bob.components = {}
             def pid(self):
                 return 1
@@ -939,6 +1199,201 @@ class TestBossComponents(unittest.TestCase):
         bob.start_all_components()
         self.__check_extended(self.__param)
 
+class SocketSrvTest(unittest.TestCase):
+    """
+    This tests some methods of boss related to the unix domain sockets used
+    to transfer other sockets to applications.
+    """
+    def setUp(self):
+        """
+        Create the boss to test, testdata and backup some functions.
+        """
+        self.__boss = BoB()
+        self.__select_backup = bind10_src.select.select
+        self.__select_called = None
+        self.__socket_data_called = None
+        self.__consumer_dead_called = None
+        self.__socket_request_handler_called = None
+
+    def tearDown(self):
+        """
+        Restore functions.
+        """
+        bind10_src.select.select = self.__select_backup
+
+    class __FalseSocket:
+        """
+        A mock socket for the select and accept and stuff like that.
+        """
+        def __init__(self, owner, fileno=42):
+            self.__owner = owner
+            self.__fileno = fileno
+            self.data = None
+            self.closed = False
+
+        def fileno(self):
+            return self.__fileno
+
+        def accept(self):
+            return self.__class__(self.__owner, 13)
+
+        def recv(self, bufsize, flags=0):
+            self.__owner.assertEqual(1, bufsize)
+            self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
+            if isinstance(self.data, socket.error):
+                raise self.data
+            elif self.data is not None:
+                if len(self.data):
+                    result = self.data[0:1]
+                    self.data = self.data[1:]
+                    return result
+                else:
+                    raise socket.error(errno.EAGAIN, "Would block")
+            else:
+                return b''
+
+        def close(self):
+            self.closed = True
+
+    class __CCS:
+        """
+        A mock CCS, just to provide the socket file number.
+        """
+        class __Socket:
+            def fileno(self):
+                return 1
+        def get_socket(self):
+            return self.__Socket()
+
+    def __select_accept(self, r, w, x, t):
+        self.__select_called = (r, w, x, t)
+        return ([42], [], [])
+
+    def __select_data(self, r, w, x, t):
+        self.__select_called = (r, w, x, t)
+        return ([13], [], [])
+
+    def __accept(self):
+        """
+        Hijact the accept method of the boss.
+
+        Notes down it was called and stops the boss.
+        """
+        self.__accept_called = True
+        self.__boss.runnable = False
+
+    def test_srv_accept_called(self):
+        """
+        Test that the _srv_accept method of boss is called when the listening
+        socket is readable.
+        """
+        self.__boss.runnable = True
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._srv_accept = self.__accept
+        self.__boss.ccs = self.__CCS()
+        bind10_src.select.select = self.__select_accept
+        self.__boss.run(2)
+        # It called the accept
+        self.assertTrue(self.__accept_called)
+        # And the select had the right parameters
+        self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
+
+    def test_srv_accept(self):
+        """
+        Test how the _srv_accept method works.
+        """
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._srv_accept()
+        # After we accepted, a new socket is added there
+        socket = self.__boss._unix_sockets[13][0]
+        # The socket is properly stored there
+        self.assertTrue(isinstance(socket, self.__FalseSocket))
+        # And the buffer (yet empty) is there
+        self.assertEqual({13: (socket, b'')}, self.__boss._unix_sockets)
+
+    def __socket_data(self, socket):
+        self.__boss.runnable = False
+        self.__socket_data_called = socket
+
+    def test_socket_data(self):
+        """
+        Test that a socket that wants attention gets it.
+        """
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._socket_data = self.__socket_data
+        self.__boss.ccs = self.__CCS()
+        self.__boss._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
+        self.__boss.runnable = True
+        bind10_src.select.select = self.__select_data
+        self.__boss.run(2)
+        self.assertEqual(13, self.__socket_data_called)
+        self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
+
+    def __prepare_data(self, data):
+        socket = self.__FalseSocket(self, 13)
+        self.__boss._unix_sockets = {13: (socket, b'')}
+        socket.data = data
+        self.__boss.socket_consumer_dead = self.__consumer_dead
+        self.__boss.socket_request_handler = self.__socket_request_handler
+        return socket
+
+    def __consumer_dead(self, socket):
+        self.__consumer_dead_called = socket
+
+    def __socket_request_handler(self, token, socket):
+        self.__socket_request_handler_called = (token, socket)
+
+    def test_socket_closed(self):
+        """
+        Test that a socket is removed and the socket_consumer_dead is called
+        when it is closed.
+        """
+        socket = self.__prepare_data(None)
+        self.__boss._socket_data(13)
+        self.assertEqual(socket, self.__consumer_dead_called)
+        self.assertEqual({}, self.__boss._unix_sockets)
+        self.assertTrue(socket.closed)
+
+    def test_socket_short(self):
+        """
+        Test that if there's not enough data to get the whole socket, it is
+        kept there, but nothing is called.
+        """
+        socket = self.__prepare_data(b'tok')
+        self.__boss._socket_data(13)
+        self.assertEqual({13: (socket, b'tok')}, self.__boss._unix_sockets)
+        self.assertFalse(socket.closed)
+        self.assertIsNone(self.__consumer_dead_called)
+        self.assertIsNone(self.__socket_request_handler_called)
+
+    def test_socket_continue(self):
+        """
+        Test that we call the token handling function when the whole token
+        comes. This test pretends to continue reading where the previous one
+        stopped.
+        """
+        socket = self.__prepare_data(b"en\nanothe")
+        # The data to finish
+        self.__boss._unix_sockets[13] = (socket, b'tok')
+        self.__boss._socket_data(13)
+        self.assertEqual({13: (socket, b'anothe')}, self.__boss._unix_sockets)
+        self.assertFalse(socket.closed)
+        self.assertIsNone(self.__consumer_dead_called)
+        self.assertEqual((b'token', socket),
+                         self.__socket_request_handler_called)
+
+    def test_broken_socket(self):
+        """
+        If the socket raises an exception during the read other than EAGAIN,
+        it is broken and we remove it.
+        """
+        sock = self.__prepare_data(socket.error(errno.ENOMEM,
+            "There's more memory available, but not for you"))
+        self.__boss._socket_data(13)
+        self.assertEqual(sock, self.__consumer_dead_called)
+        self.assertEqual({}, self.__boss._unix_sockets)
+        self.assertTrue(sock.closed)
+
 if __name__ == '__main__':
     # store os.environ for test_unchanged_environment
     original_os_environ = copy.deepcopy(os.environ)
diff --git a/src/bin/dhcp6/.gitignore b/src/bin/dhcp6/.gitignore
index 6a6060b..e4e8f2d 100644
--- a/src/bin/dhcp6/.gitignore
+++ b/src/bin/dhcp6/.gitignore
@@ -7,3 +7,4 @@ Makefile.in
 b10-dhcp6
 spec_config.h
 spec_config.h.pre
+tests/dhcp6_unittests
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
index ba5afec..d5a969f 100644
--- a/src/bin/dhcp6/dhcp6_srv.cc
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -12,26 +12,32 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include "dhcp/dhcp6.h"
-#include "dhcp/pkt6.h"
-#include "dhcp6/iface_mgr.h"
-#include "dhcp6/dhcp6_srv.h"
-#include "dhcp/option6_ia.h"
-#include "dhcp/option6_iaaddr.h"
-#include "asiolink/io_address.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp6/iface_mgr.h>
+#include <dhcp6/dhcp6_srv.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
+#include <asiolink/io_address.h>
+#include <exceptions/exceptions.h>
 
 using namespace std;
 using namespace isc;
 using namespace isc::dhcp;
 using namespace isc::asiolink;
 
-Dhcpv6Srv::Dhcpv6Srv() {
+Dhcpv6Srv::Dhcpv6Srv(uint16_t port) {
+
+//void Dhcpv6Srv::Dhcpv6Srv_impl(uint16_t port) {
     cout << "Initialization" << endl;
 
-    // first call to instance() will create IfaceMgr (it's a singleton)
-    // it may throw something if things go wrong
+    // First call to instance() will create IfaceMgr (it's a singleton).
+    // It may throw something if things go wrong.
     IfaceMgr::instance();
 
+    // Now try to open IPv6 sockets on detected interfaces.
+    IfaceMgr::instance().openSockets(port);
+
     /// @todo: instantiate LeaseMgr here once it is imlpemented.
 
     setServerID();
@@ -41,6 +47,8 @@ Dhcpv6Srv::Dhcpv6Srv() {
 
 Dhcpv6Srv::~Dhcpv6Srv() {
     cout << "DHCPv6 Srv shutdown." << endl;
+
+    IfaceMgr::instance().closeSockets();
 }
 
 bool
@@ -49,7 +57,7 @@ Dhcpv6Srv::run() {
         boost::shared_ptr<Pkt6> query; // client's message
         boost::shared_ptr<Pkt6> rsp;   // server's response
 
-        query = IfaceMgr::instance().receive();
+        query = IfaceMgr::instance().receive6();
 
         if (query) {
             if (!query->unpack()) {
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
index 4daef3a..bcc7818 100644
--- a/src/bin/dhcp6/dhcp6_srv.h
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -17,8 +17,9 @@
 
 #include <boost/shared_ptr.hpp>
 #include <boost/noncopyable.hpp>
-#include "dhcp/pkt6.h"
-#include "dhcp/option.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/option.h>
 #include <iostream>
 
 namespace isc {
@@ -41,10 +42,12 @@ public:
     /// In particular, creates IfaceMgr that will be responsible for
     /// network interaction. Will instantiate lease manager, and load
     /// old or create new DUID.
-    Dhcpv6Srv();
+    ///
+    /// @param port port on will all sockets will listen
+    Dhcpv6Srv(uint16_t port = DHCP6_SERVER_PORT);
 
     /// @brief Destructor. Used during DHCPv6 service shutdown.
-    ~Dhcpv6Srv();
+    virtual ~Dhcpv6Srv();
 
     /// @brief Returns server-intentifier option
     ///
diff --git a/src/bin/dhcp6/iface_mgr.cc b/src/bin/dhcp6/iface_mgr.cc
index a96db07..60dac63 100644
--- a/src/bin/dhcp6/iface_mgr.cc
+++ b/src/bin/dhcp6/iface_mgr.cc
@@ -18,9 +18,9 @@
 #include <netinet/in.h>
 #include <arpa/inet.h>
 
-#include "dhcp/dhcp6.h"
-#include "dhcp6/iface_mgr.h"
-#include "exceptions/exceptions.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp6/iface_mgr.h>
+#include <exceptions/exceptions.h>
 
 using namespace std;
 using namespace isc;
@@ -79,6 +79,30 @@ IfaceMgr::Iface::getPlainMac() const {
     return (tmp.str());
 }
 
+bool IfaceMgr::Iface::delAddress(const isc::asiolink::IOAddress& addr) {
+
+    // Let's delete all addresses that match. It really shouldn't matter
+    // if we delete first or all, as the OS should allow to add a single
+    // address to an interface only once. If OS allows multiple instances
+    // of the same address added, we are in deep problems anyway.
+    size_t size = addrs_.size();
+    addrs_.erase(remove(addrs_.begin(), addrs_.end(), addr), addrs_.end());
+    return (addrs_.size() < size);
+}
+
+bool IfaceMgr::Iface::delSocket(uint16_t sockfd) {
+    list<SocketInfo>::iterator sock = sockets_.begin();
+    while (sock!=sockets_.end()) {
+        if (sock->sockfd_ == sockfd) {
+            close(sockfd);
+            sockets_.erase(sock);
+            return (true); //socket found
+        }
+        ++sock;
+    }
+    return (false); // socket not found
+}
+
 IfaceMgr::IfaceMgr()
     :control_buf_len_(CMSG_SPACE(sizeof(struct in6_pktinfo))),
      control_buf_(new char[control_buf_len_])
@@ -95,9 +119,6 @@ IfaceMgr::IfaceMgr()
 
         detectIfaces();
 
-        if (!openSockets()) {
-            isc_throw(Unexpected, "Failed to open/bind sockets.");
-        }
     } catch (const std::exception& ex) {
         cout << "IfaceMgr creation failed:" << ex.what() << endl;
 
@@ -109,7 +130,23 @@ IfaceMgr::IfaceMgr()
     }
 }
 
+void IfaceMgr::closeSockets() {
+    for (IfaceCollection::iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
+
+        for (SocketCollection::iterator sock = iface->sockets_.begin();
+             sock != iface->sockets_.end(); ++sock) {
+            cout << "Closing socket " << sock->sockfd_ << endl;
+            close(sock->sockfd_);
+        }
+        iface->sockets_.clear();
+    }
+
+}
+
 IfaceMgr::~IfaceMgr() {
+    closeSockets();
+
     // control_buf_ is deleted automatically (scoped_ptr)
     control_buf_len_ = 0;
 }
@@ -139,8 +176,8 @@ IfaceMgr::detectIfaces() {
 
         Iface iface(ifaceName, if_nametoindex( ifaceName.c_str() ) );
         IOAddress addr(linkLocal);
-        iface.addrs_.push_back(addr);
-        ifaces_.push_back(iface);
+        iface.addAddress(addr);
+        addInterface(iface);
         interfaces.close();
     } catch (const std::exception& ex) {
         // TODO: deallocate whatever memory we used
@@ -154,51 +191,55 @@ IfaceMgr::detectIfaces() {
     }
 }
 
-bool
-IfaceMgr::openSockets() {
-    int sock;
+void
+IfaceMgr::openSockets(uint16_t port) {
+    int sock1, sock2;
+
+    for (IfaceCollection::iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
 
-    for (IfaceLst::iterator iface=ifaces_.begin();
-         iface!=ifaces_.end();
-         ++iface) {
+        AddressCollection addrs = iface->getAddresses();
 
-        for (Addr6Lst::iterator addr=iface->addrs_.begin();
-             addr!=iface->addrs_.end();
+        for (AddressCollection::iterator addr = addrs.begin();
+             addr != addrs.end();
              ++addr) {
 
-            sock = openSocket(iface->name_, *addr,
-                              DHCP6_SERVER_PORT);
-            if (sock<0) {
-                cout << "Failed to open unicast socket." << endl;
-                return (false);
+            sock1 = openSocket(iface->getName(), *addr, port);
+            if (sock1 < 0) {
+                isc_throw(Unexpected, "Failed to open unicast socket on "
+                          << " interface " << iface->getFullName());
             }
-            sendsock_ = sock;
-
-            sock = openSocket(iface->name_,
-                              IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
-                              DHCP6_SERVER_PORT);
-            if (sock<0) {
-                cout << "Failed to open multicast socket." << endl;
-                close(sendsock_);
-                return (false);
+
+            if ( !joinMcast(sock1, iface->getName(),
+                             string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+                close(sock1);
+                isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+                          << " multicast group.");
+            }
+
+            // this doesn't work too well on NetBSD
+            sock2 = openSocket(iface->getName(),
+                               IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
+                               port);
+            if (sock2 < 0) {
+                isc_throw(Unexpected, "Failed to open multicast socket on "
+                          << " interface " << iface->getFullName());
+                iface->delSocket(sock1); // delete previously opened socket
             }
-            recvsock_ = sock;
         }
     }
-
-    return (true);
 }
 
 void
 IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
-    for (IfaceLst::const_iterator iface=ifaces_.begin();
-         iface!=ifaces_.end();
-         ++iface) {
+    for (IfaceCollection::const_iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
         out << "Detected interface " << iface->getFullName() << endl;
-        out << "  " << iface->addrs_.size() << " addr(s):" << endl;
-        for (Addr6Lst::const_iterator addr=iface->addrs_.begin();
-             addr != iface->addrs_.end();
-             ++addr) {
+        out << "  " << iface->getAddresses().size() << " addr(s):" << endl;
+        const AddressCollection addrs = iface->getAddresses();
+
+        for (AddressCollection::const_iterator addr = addrs.begin();
+             addr != addrs.end(); ++addr) {
             out << "  " << addr->toText() << endl;
         }
         out << "  mac: " << iface->getPlainMac() << endl;
@@ -207,11 +248,11 @@ IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
 
 IfaceMgr::Iface*
 IfaceMgr::getIface(int ifindex) {
-    for (IfaceLst::iterator iface=ifaces_.begin();
-         iface!=ifaces_.end();
-         ++iface) {
-        if (iface->ifindex_ == ifindex)
+    for (IfaceCollection::iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
+        if (iface->getIndex() == ifindex) {
             return (&(*iface));
+        }
     }
 
     return (NULL); // not found
@@ -219,29 +260,87 @@ IfaceMgr::getIface(int ifindex) {
 
 IfaceMgr::Iface*
 IfaceMgr::getIface(const std::string& ifname) {
-    for (IfaceLst::iterator iface=ifaces_.begin();
-         iface!=ifaces_.end();
-         ++iface) {
-        if (iface->name_ == ifname)
+    for (IfaceCollection::iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
+        if (iface->getName() == ifname) {
             return (&(*iface));
+        }
     }
 
     return (NULL); // not found
 }
 
 int
-IfaceMgr::openSocket(const std::string& ifname,
-                     const IOAddress& addr,
+IfaceMgr::openSocket(const std::string& ifname, const IOAddress& addr,
                      int port) {
-    struct sockaddr_in6 addr6;
+    Iface* iface = getIface(ifname);
+    if (!iface) {
+        isc_throw(BadValue, "There is no " << ifname << " interface present.");
+    }
+    switch (addr.getFamily()) {
+    case AF_INET:
+        return openSocket4(*iface, addr, port);
+    case AF_INET6:
+        return openSocket6(*iface, addr, port);
+    default:
+        isc_throw(BadValue, "Failed to detect family of address: "
+                  << addr.toText());
+    }
+}
+
+int
+IfaceMgr::openSocket4(Iface& iface, const IOAddress& addr, int port) {
+
+    cout << "Creating UDP4 socket on " << iface.getFullName()
+         << " " << addr.toText() << "/port=" << port << endl;
+
+    struct sockaddr_in addr4;
+    memset(&addr4, 0, sizeof(sockaddr));
+    addr4.sin_family = AF_INET;
+    addr4.sin_port = htons(port);
+    memcpy(&addr4.sin_addr, addr.getAddress().to_v4().to_bytes().data(),
+           sizeof(addr4.sin_addr));
+
+    int sock = socket(AF_INET, SOCK_DGRAM, 0);
+    if (sock < 0) {
+        isc_throw(Unexpected, "Failed to create UDP6 socket.");
+    }
+
+    if (bind(sock, (struct sockaddr *)&addr4, sizeof(addr4)) < 0) {
+        close(sock);
+        isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+                  << "/port=" << port);
+    }
+
+    // If there is no support for IP_PKTINFO, we are really out of luck.
+    // It will be difficult to understand, where this packet came from.
+#if defined(IP_PKTINFO)
+    int flag = 1;
+    if (setsockopt(sock, IPPROTO_IP, IP_PKTINFO, &flag, sizeof(flag)) != 0) {
+        close(sock);
+        isc_throw(Unexpected, "setsockopt: IP_PKTINFO: failed.");
+    }
+#endif
+
+    cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
+        addr.toText() << "/port=" << port << endl;
 
-    cout << "Creating socket on " << ifname << "/" << addr.toText()
-         << "/port=" << port << endl;
+    iface.addSocket(SocketInfo(sock, addr, port));
 
+    return (sock);
+}
+
+int
+IfaceMgr::openSocket6(Iface& iface, const IOAddress& addr, int port) {
+
+    cout << "Creating UDP6 socket on " << iface.getFullName()
+         << " " << addr.toText() << "/port=" << port << endl;
+
+    struct sockaddr_in6 addr6;
     memset(&addr6, 0, sizeof(addr6));
     addr6.sin6_family = AF_INET6;
     addr6.sin6_port = htons(port);
-    addr6.sin6_scope_id = if_nametoindex(ifname.c_str());
+    addr6.sin6_scope_id = if_nametoindex(iface.getName().c_str());
 
     memcpy(&addr6.sin6_addr,
            addr.getAddress().to_v6().to_bytes().data(),
@@ -255,61 +354,58 @@ IfaceMgr::openSocket(const std::string& ifname,
     // make a socket
     int sock = socket(AF_INET6, SOCK_DGRAM, 0);
     if (sock < 0) {
-        cout << "Failed to create UDP6 socket." << endl;
-        return (-1);
+        isc_throw(Unexpected, "Failed to create UDP6 socket.");
     }
 
-    /* Set the REUSEADDR option so that we don't fail to start if
-       we're being restarted. */
+    // Set the REUSEADDR option so that we don't fail to start if
+    // we're being restarted.
     int flag = 1;
     if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
                    (char *)&flag, sizeof(flag)) < 0) {
-        cout << "Can't set SO_REUSEADDR option on dhcpv6 socket." << endl;
         close(sock);
-        return (-1);
+        isc_throw(Unexpected, "Can't set SO_REUSEADDR option on dhcpv6 socket.");
     }
 
     if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
-        cout << "Failed to bind socket " << sock << " to " << addr.toText()
-             << "/port=" << port << endl;
         close(sock);
-        return (-1);
+        isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+                  << "/port=" << port);
     }
 #ifdef IPV6_RECVPKTINFO
-    /* RFC3542 - a new way */
+    // RFC3542 - a new way
     if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
                    &flag, sizeof(flag)) != 0) {
-        cout << "setsockopt: IPV6_RECVPKTINFO failed." << endl;
         close(sock);
-        return (-1);
+        isc_throw(Unexpected, "setsockopt: IPV6_RECVPKTINFO failed.");
     }
 #else
-    /* RFC2292 - an old way */
+    // RFC2292 - an old way
     if (setsockopt(sock, IPPROTO_IPV6, IPV6_PKTINFO,
                    &flag, sizeof(flag)) != 0) {
-        cout << "setsockopt: IPV6_PKTINFO: failed." << endl;
         close(sock);
-        return (-1);
+        isc_throw(Unexpected, "setsockopt: IPV6_PKTINFO: failed.");
     }
 #endif
 
     // multicast stuff
-
     if (addr.getAddress().to_v6().is_multicast()) {
         // both mcast (ALL_DHCP_RELAY_AGENTS_AND_SERVERS and ALL_DHCP_SERVERS)
         // are link and site-scoped, so there is no sense to join those groups
         // with global addresses.
 
-        if ( !joinMcast( sock, ifname,
+        if ( !joinMcast( sock, iface.getName(),
                          string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
             close(sock);
-            return (-1);
+            isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+                      << " multicast group.");
         }
     }
 
-    cout << "Created socket " << sock << " on " << ifname << "/" <<
+    cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
         addr.toText() << "/port=" << port << endl;
 
+    iface.addSocket(SocketInfo(sock, addr, port));
+
     return (sock);
 }
 
@@ -345,16 +441,19 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
     int result;
     struct in6_pktinfo *pktinfo;
     struct cmsghdr *cmsg;
+
+    Iface* iface = getIface(pkt->iface_);
+    if (!iface) {
+        isc_throw(BadValue, "Unable to send Pkt6. Invalid interface ("
+                  << pkt->iface_ << ") specified.");
+    }
+
     memset(&control_buf_[0], 0, control_buf_len_);
 
-    /*
-     * Initialize our message header structure.
-     */
+    // Initialize our message header structure.
     memset(&m, 0, sizeof(m));
 
-    /*
-     * Set the target address we're sending to.
-     */
+    // Set the target address we're sending to.
     sockaddr_in6 to;
     memset(&to, 0, sizeof(to));
     to.sin6_family = AF_INET6;
@@ -367,24 +466,20 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
     m.msg_name = &to;
     m.msg_namelen = sizeof(to);
 
-    /*
-     * Set the data buffer we're sending. (Using this wacky
-     * "scatter-gather" stuff... we only have a single chunk
-     * of data to send, so we declare a single vector entry.)
-     */
+    // Set the data buffer we're sending. (Using this wacky
+    // "scatter-gather" stuff... we only have a single chunk
+    // of data to send, so we declare a single vector entry.)
     v.iov_base = (char *) &pkt->data_[0];
     v.iov_len = pkt->data_len_;
     m.msg_iov = &v;
     m.msg_iovlen = 1;
 
-    /*
-     * Setting the interface is a bit more involved.
-     *
-     * We have to create a "control message", and set that to
-     * define the IPv6 packet information. We could set the
-     * source address if we wanted, but we can safely let the
-     * kernel decide what that should be.
-     */
+    // Setting the interface is a bit more involved.
+    //
+    // We have to create a "control message", and set that to
+    // define the IPv6 packet information. We could set the
+    // source address if we wanted, but we can safely let the
+    // kernel decide what that should be.
     m.msg_control = &control_buf_[0];
     m.msg_controllen = control_buf_len_;
     cmsg = CMSG_FIRSTHDR(&m);
@@ -396,14 +491,12 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
     pktinfo->ipi6_ifindex = pkt->ifindex_;
     m.msg_controllen = cmsg->cmsg_len;
 
-    result = sendmsg(sendsock_, &m, 0);
+    result = sendmsg(getSocket(*pkt), &m, 0);
     if (result < 0) {
         cout << "Send packet failed." << endl;
     }
-    cout << "Sent " << result << " bytes." << endl;
-
-    cout << "Sent " << pkt->data_len_ << " bytes over "
-         << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+    cout << "Sent " << pkt->data_len_ << " bytes over socket " << getSocket(*pkt)
+         << " on " << iface->getFullName() << " interface: "
          << " dst=" << pkt->remote_addr_.toText()
          << ", src=" << pkt->local_addr_.toText()
          << endl;
@@ -411,8 +504,24 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
     return (result);
 }
 
+bool
+IfaceMgr::send(boost::shared_ptr<Pkt4>& )
+{
+    /// TODO: Implement this (ticket #1240)
+    isc_throw(NotImplemented, "Pkt4 send not implemented yet.");
+}
+
+
+boost::shared_ptr<Pkt4>
+IfaceMgr::receive4() {
+    isc_throw(NotImplemented, "Pkt4 reception not implemented yet.");
+
+    // TODO: To be implemented (ticket #1239)
+    return (boost::shared_ptr<Pkt4>()); // NULL
+}
+
 boost::shared_ptr<Pkt6>
-IfaceMgr::receive() {
+IfaceMgr::receive6() {
     struct msghdr m;
     struct iovec v;
     int result;
@@ -442,49 +551,66 @@ IfaceMgr::receive() {
     memset(&from, 0, sizeof(from));
     memset(&to_addr, 0, sizeof(to_addr));
 
-    /*
-     * Initialize our message header structure.
-     */
+    // Initialize our message header structure.
     memset(&m, 0, sizeof(m));
 
-    /*
-     * Point so we can get the from address.
-     */
+    // Point so we can get the from address.
     m.msg_name = &from;
     m.msg_namelen = sizeof(from);
 
-    /*
-     * Set the data buffer we're receiving. (Using this wacky
-     * "scatter-gather" stuff... but we that doesn't really make
-     * sense for us, so we use a single vector entry.)
-     */
+    // Set the data buffer we're receiving. (Using this wacky
+    // "scatter-gather" stuff... but we that doesn't really make
+    // sense for us, so we use a single vector entry.)
     v.iov_base = (void*)&pkt->data_[0];
     v.iov_len = pkt->data_len_;
     m.msg_iov = &v;
     m.msg_iovlen = 1;
 
-    /*
-     * Getting the interface is a bit more involved.
-     *
-     * We set up some space for a "control message". We have
-     * previously asked the kernel to give us packet
-     * information (when we initialized the interface), so we
-     * should get the destination address from that.
-     */
+    // Getting the interface is a bit more involved.
+    //
+    // We set up some space for a "control message". We have
+    // previously asked the kernel to give us packet
+    // information (when we initialized the interface), so we
+    // should get the destination address from that.
     m.msg_control = &control_buf_[0];
     m.msg_controllen = control_buf_len_;
 
-    result = recvmsg(recvsock_, &m, 0);
+    /// TODO: Need to move to select() and pool over
+    /// all available sockets. For now, we just take the
+    /// first interface and use first socket from it.
+    IfaceCollection::const_iterator iface = ifaces_.begin();
+    if (iface == ifaces_.end()) {
+        isc_throw(Unexpected, "No interfaces detected. Can't receive anything.");
+    }
+    SocketCollection::const_iterator s = iface->sockets_.begin();
+    const SocketInfo* candidate = 0;
+    while (s != iface->sockets_.end()) {
+        if (s->addr_.getAddress().to_v6().is_multicast()) {
+            candidate = &(*s);
+            break;
+        }
+        if (!candidate) {
+            candidate = &(*s); // it's not multicast, but it's better than none
+        }
+        ++s;
+    }
+    if (!candidate) {
+        isc_throw(Unexpected, "Interface " << iface->getFullName()
+                  << " does not have any sockets open.");
+    }
+
+    cout << "Trying to receive over socket " << candidate->sockfd_ << " bound to "
+         << candidate->addr_.toText() << "/port=" << candidate->port_ << " on "
+         << iface->getFullName() << endl;
+    result = recvmsg(candidate->sockfd_, &m, 0);
 
     if (result >= 0) {
-        /*
-         * If we did read successfully, then we need to loop
-         * through the control messages we received and
-         * find the one with our destination address.
-         *
-         * We also keep a flag to see if we found it. If we
-         * didn't, then we consider this to be an error.
-         */
+        // If we did read successfully, then we need to loop
+        // through the control messages we received and
+        // find the one with our destination address.
+        //
+        // We also keep a flag to see if we found it. If we
+        // didn't, then we consider this to be an error.
         int found_pktinfo = 0;
         cmsg = CMSG_FIRSTHDR(&m);
         while (cmsg != NULL) {
@@ -520,7 +646,7 @@ IfaceMgr::receive() {
 
     Iface* received = getIface(pkt->ifindex_);
     if (received) {
-        pkt->iface_ = received->name_;
+        pkt->iface_ = received->getName();
     } else {
         cout << "Received packet over unknown interface (ifindex="
              << pkt->ifindex_ << ")." << endl;
@@ -539,4 +665,60 @@ IfaceMgr::receive() {
     return (pkt);
 }
 
+uint16_t
+IfaceMgr::getSocket(isc::dhcp::Pkt6 const& pkt) {
+    Iface* iface = getIface(pkt.iface_);
+    if (!iface) {
+        isc_throw(BadValue, "Tried to find socket for non-existent interface "
+                  << pkt.iface_);
+    }
+
+    SocketCollection::const_iterator s;
+    for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+        if (s->family_ != AF_INET6) {
+            // don't use IPv4 sockets
+            continue;
+        }
+        if (s->addr_.getAddress().to_v6().is_multicast()) {
+            // don't use IPv6 sockets bound to multicast address
+            continue;
+        }
+        /// TODO: Add more checks here later. If remote address is
+        /// not link-local, we can't use link local bound socket
+        /// to send data.
+
+        return (s->sockfd_);
+    }
+
+    isc_throw(Unexpected, "Interface " << iface->getFullName()
+              << " does not have any suitable IPv6 sockets open.");
+}
+
+uint16_t
+IfaceMgr::getSocket(isc::dhcp::Pkt4 const& pkt) {
+    Iface* iface = getIface(pkt.getIface());
+    if (!iface) {
+        isc_throw(BadValue, "Tried to find socket for non-existent interface "
+                  << pkt.getIface());
+    }
+
+    SocketCollection::const_iterator s;
+    for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+        if (s->family_ != AF_INET) {
+            // don't use IPv4 sockets
+            continue;
+        }
+        /// TODO: Add more checks here later. If remote address is
+        /// not link-local, we can't use link local bound socket
+        /// to send data.
+
+        return (s->sockfd_);
+    }
+
+    isc_throw(Unexpected, "Interface " << iface->getFullName()
+              << " does not have any suitable IPv4 sockets open.");
+}
+
+
+
 }
diff --git a/src/bin/dhcp6/iface_mgr.h b/src/bin/dhcp6/iface_mgr.h
index 249c7ef..0aa2592 100644
--- a/src/bin/dhcp6/iface_mgr.h
+++ b/src/bin/dhcp6/iface_mgr.h
@@ -19,8 +19,9 @@
 #include <boost/shared_ptr.hpp>
 #include <boost/scoped_array.hpp>
 #include <boost/noncopyable.hpp>
-#include "asiolink/io_address.h"
-#include "dhcp/pkt6.h"
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/pkt6.h>
 
 namespace isc {
 
@@ -34,26 +35,119 @@ namespace dhcp {
 class IfaceMgr : public boost::noncopyable {
 public:
     /// type that defines list of addresses
-    typedef std::list<isc::asiolink::IOAddress> Addr6Lst;
+    typedef std::vector<isc::asiolink::IOAddress> AddressCollection;
 
     /// maximum MAC address length (Infiniband uses 20 bytes)
     static const unsigned int MAX_MAC_LEN = 20;
 
+    /// Holds information about socket.
+    struct SocketInfo {
+        uint16_t sockfd_; /// socket descriptor
+        isc::asiolink::IOAddress addr_; /// bound address
+        uint16_t port_;   /// socket port
+        uint16_t family_; /// IPv4 or IPv6
+
+        /// @brief SocketInfo constructor.
+        ///
+        /// @param sockfd socket descriptor
+        /// @param addr an address the socket is bound to
+        /// @param port a port the socket is bound to
+        SocketInfo(uint16_t sockfd, const isc::asiolink::IOAddress& addr,
+                   uint16_t port)
+        :sockfd_(sockfd), addr_(addr), port_(port), family_(addr.getFamily()) { }
+    };
+
+    /// type that holds a list of socket informations
+    typedef std::list<SocketInfo> SocketCollection;
+
     /// @brief represents a single network interface
     ///
     /// Iface structure represents network interface with all useful
     /// information, like name, interface index, MAC address and
     /// list of assigned addresses
-    struct Iface {
-        /// constructor
+    class Iface {
+    public:
+        /// @brief Iface constructor.
+        ///
+        /// Creates Iface object that represents network interface.
+        ///
+        /// @param name name of the interface
+        /// @param ifindex interface index (unique integer identifier)
         Iface(const std::string& name, int ifindex);
 
-        /// returns full interface name in format ifname/ifindex
+        /// @brief Returns full interface name as "ifname/ifindex" string.
+        ///
+        /// @return string with interface name
         std::string getFullName() const;
 
-        /// returns link-layer address a plain text
+        /// @brief Returns link-layer address a plain text.
+        ///
+        /// @return MAC address as a plain text (string)
         std::string getPlainMac() const;
 
+        /// @brief Returns interface index.
+        ///
+        /// @return interface index
+        uint16_t getIndex() const { return ifindex_; }
+
+        /// @brief Returns interface name.
+        ///
+        /// @return interface name
+        std::string getName() const { return name_; };
+
+        /// @brief Returns all interfaces available on an interface.
+        ///
+        /// Care should be taken to not use this collection after Iface object
+        /// ceases to exist. That is easy in most cases as Iface objects are
+        /// created by IfaceMgr that is a singleton an is expected to be
+        /// available at all time. We may revisit this if we ever decide to
+        /// implement dynamic interface detection, but such fancy feature would
+        /// mostly be useful for clients with wifi/vpn/virtual interfaces.
+        ///
+        /// @return collection of addresses
+        const AddressCollection& getAddresses() const { return addrs_; }
+
+        /// @brief Adds an address to an interface.
+        ///
+        /// This only adds an address to collection, it does not physically
+        /// configure address on actual network interface.
+        ///
+        /// @param addr address to be added
+        void addAddress(const isc::asiolink::IOAddress& addr) {
+            addrs_.push_back(addr);
+        }
+
+        /// @brief Deletes an address from an interface.
+        ///
+        /// This only deletes address from collection, it does not physically
+        /// remove address configuration from actual network interface.
+        ///
+        /// @param addr address to be removed.
+        ///
+        /// @return true if removal was successful (address was in collection),
+        ///         false otherwise
+        bool delAddress(const isc::asiolink::IOAddress& addr);
+
+        /// @brief Adds socket descriptor to an interface.
+        ///
+        /// @param socket SocketInfo structure that describes socket.
+        void addSocket(const SocketInfo& sock)
+            { sockets_.push_back(sock); }
+
+        /// @brief Closes socket.
+        ///
+        /// Closes socket and removes corresponding SocketInfo structure
+        /// from an interface.
+        ///
+        /// @param socket descriptor to be closed/removed.
+        /// @return true if there was such socket, false otherwise
+        bool delSocket(uint16_t sockfd);
+
+        /// socket used to sending data
+        /// TODO: this should be protected
+        SocketCollection sockets_;
+
+    protected:
         /// network interface name
         std::string name_;
 
@@ -61,19 +155,13 @@ public:
         int ifindex_;
 
         /// list of assigned addresses
-        Addr6Lst addrs_;
+        AddressCollection addrs_;
 
         /// link-layer address
         uint8_t mac_[MAX_MAC_LEN];
 
         /// length of link-layer address (usually 6)
         int mac_len_;
-
-        /// socket used to sending data
-        int sendsock_;
-
-        /// socket used for receiving data
-        int recvsock_;
     };
 
     // TODO performance improvement: we may change this into
@@ -81,7 +169,7 @@ public:
     //      also hide it (make it public make tests easier for now)
 
     /// type that holds a list of interfaces
-    typedef std::list<Iface> IfaceLst;
+    typedef std::list<Iface> IfaceCollection;
 
     /// IfaceMgr is a singleton class. This method returns reference
     /// to its sole instance.
@@ -109,27 +197,63 @@ public:
     Iface*
     getIface(const std::string& ifname);
 
+    /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+    ///
+    /// This method takes Pkt6 (see overloaded implementation that takes
+    /// Pkt4) and chooses appropriate socket to send it. This method
+    /// may throw BadValue if specified packet does not have outbound
+    /// interface specified, no such interface exists, or specified
+    /// interface does not have any appropriate sockets open.
+    ///
+    /// @param pkt a packet to be transmitted
+    ///
+    /// @return a socket descriptor
+    uint16_t getSocket(const isc::dhcp::Pkt6& pkt);
+
+    /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+    ///
+    /// This method takes Pkt4 (see overloaded implementation that takes
+    /// Pkt6) and chooses appropriate socket to send it. This method
+    /// may throw BadValue if specified packet does not have outbound
+    /// interface specified, no such interface exists, or specified
+    /// interface does not have any appropriate sockets open.
+    ///
+    /// @param pkt a packet to be transmitted
+    ///
+    /// @return a socket descriptor
+    uint16_t getSocket(const isc::dhcp::Pkt4& pkt);
+
     /// debugging method that prints out all available interfaces
     ///
     /// @param out specifies stream to print list of interfaces to
     void
     printIfaces(std::ostream& out = std::cout);
 
-    /// @brief Sends a packet.
+    /// @brief Sends an IPv6 packet.
     ///
-    /// Sends a packet. All parameters for actual transmission are specified in
+    /// Sends an IPv6 packet. All parameters for actual transmission are specified in
     /// Pkt6 structure itself. That includes destination address, src/dst port
     /// and interface over which data will be sent.
     ///
     /// @param pkt packet to be sent
     ///
     /// @return true if sending was successful
-    bool
-    send(boost::shared_ptr<Pkt6>& pkt);
+    bool send(boost::shared_ptr<Pkt6>& pkt);
 
-    /// @brief Tries to receive packet over open sockets.
+    /// @brief Sends an IPv4 packet.
     ///
-    /// Attempts to receive a single packet of any of the open sockets.
+    /// Sends an IPv4 packet. All parameters for actual transmission are specified
+    /// in Pkt4 structure itself. That includes destination address, src/dst
+    /// port and interface over which data will be sent.
+    ///
+    /// @param pkt a packet to be sent
+    ///
+    /// @return true if sending was successful
+    bool send(boost::shared_ptr<Pkt4>& pkt);
+
+    /// @brief Tries to receive IPv6 packet over open IPv6 sockets.
+    ///
+    /// Attempts to receive a single IPv6 packet of any of the open IPv6 sockets.
     /// If reception is successful and all information about its sender
     /// are obtained, Pkt6 object is created and returned.
     ///
@@ -138,7 +262,49 @@ public:
     /// (e.g. remove expired leases)
     ///
     /// @return Pkt6 object representing received packet (or NULL)
-    boost::shared_ptr<Pkt6> receive();
+    boost::shared_ptr<Pkt6> receive6();
+
+    /// @brief Tries to receive IPv4 packet over open IPv4 sockets.
+    ///
+    /// Attempts to receive a single IPv4 packet of any of the open IPv4 sockets.
+    /// If reception is successful and all information about its sender
+    /// are obtained, Pkt4 object is created and returned.
+    ///
+    /// TODO Start using select() and add timeout to be able
+    /// to not wait infinitely, but rather do something useful
+    /// (e.g. remove expired leases)
+    ///
+    /// @return Pkt4 object representing received packet (or NULL)
+    boost::shared_ptr<Pkt4> receive4();
+
+    /// Opens UDP/IP socket and binds it to address, interface and port.
+    ///
+    /// Specific type of socket (UDP/IPv4 or UDP/IPv6) depends on passed addr
+    /// family.
+    ///
+    /// @param ifname name of the interface
+    /// @param addr address to be bound.
+    /// @param port UDP port.
+    ///
+    /// Method will throw if socket creation, socket binding or multicast
+    /// join fails.
+    ///
+    /// @return socket descriptor, if socket creation, binding and multicast
+    /// group join were all successful.
+    int openSocket(const std::string& ifname,
+                   const isc::asiolink::IOAddress& addr, int port);
+
+    /// Opens IPv6 sockets on detected interfaces.
+    ///
+    /// Will throw exception if socket creation fails.
+    ///
+    /// @param port specifies port number (usually DHCP6_SERVER_PORT)
+    void openSockets(uint16_t port);
+
+
+    /// @brief Closes all open sockets.
+    /// Is used in destructor, but also from Dhcpv4_srv and Dhcpv6_srv classes.
+    void closeSockets();
 
     // don't use private, we need derived classes in tests
 protected:
@@ -146,11 +312,44 @@ protected:
     /// @brief Protected constructor.
     ///
     /// Protected constructor. This is a singleton class. We don't want
-    /// anyone to create instances of IfaceMgr. Use instance() method
+    /// anyone to create instances of IfaceMgr. Use instance() method instead.
     IfaceMgr();
 
     ~IfaceMgr();
 
+    /// @brief Opens IPv4 socket.
+    ///
+    /// Please do not use this method directly. Use openSocket instead.
+    ///
+    /// This method may throw exception if socket creation fails.
+    ///
+    /// @param iface reference to interface structure.
+    /// @param addr an address the created socket should be bound to
+    /// @param port a port that created socket should be bound to
+    ///
+    /// @return socket descriptor
+    int openSocket4(Iface& iface, const isc::asiolink::IOAddress& addr, int port);
+
+    /// @brief Opens IPv6 socket.
+    ///
+    /// Please do not use this method directly. Use openSocket instead.
+    ///
+    /// This method may throw exception if socket creation fails.
+    ///
+    /// @param iface reference to interface structure.
+    /// @param addr an address the created socket should be bound to
+    /// @param port a port that created socket should be bound to
+    ///
+    /// @return socket descriptor
+    int openSocket6(Iface& iface, const isc::asiolink::IOAddress& addr, int port);
+
+    /// @brief Adds an interface to list of known interfaces.
+    ///
+    /// @param iface reference to Iface object.
+    void addInterface(const Iface& iface) {
+        ifaces_.push_back(iface);
+    }
+
     /// @brief Detects network interfaces.
     ///
     /// This method will eventually detect available interfaces. For now
@@ -159,24 +358,11 @@ protected:
     void
     detectIfaces();
 
-    ///
-    /// Opens UDP/IPv6 socket and binds it to address, interface and port.
-    ///
-    /// @param ifname name of the interface
-    /// @param addr address to be bound.
-    /// @param port UDP port.
-    ///
-    /// @return socket descriptor, if socket creation, binding and multicast
-    /// group join were all successful. -1 otherwise.
-    int openSocket(const std::string& ifname,
-                   const isc::asiolink::IOAddress& addr,
-                   int port);
-
     // TODO: having 2 maps (ifindex->iface and ifname->iface would)
     //      probably be better for performance reasons
 
     /// List of available interfaces
-    IfaceLst ifaces_;
+    IfaceCollection ifaces_;
 
     /// a pointer to a sole instance of this class (a singleton)
     static IfaceMgr * instance_;
@@ -184,8 +370,9 @@ protected:
     // TODO: Also keep this interface on Iface once interface detection
     // is implemented. We may need it e.g. to close all sockets on
     // specific interface
-    int recvsock_; // TODO: should be fd_set eventually, but we have only
-    int sendsock_; // 2 sockets for now. Will do for until next release
+    //int recvsock_; // TODO: should be fd_set eventually, but we have only
+    //int sendsock_; // 2 sockets for now. Will do for until next release
+
     // we can't use the same socket, as receiving socket
     // is bound to multicast address. And we all know what happens
     // to people who try to use multicast as source address.
@@ -197,9 +384,6 @@ protected:
     boost::scoped_array<char> control_buf_;
 
 private:
-    /// Opens sockets on detected interfaces.
-    bool
-    openSockets();
 
     /// creates a single instance of this class (a singleton implementation)
     static void
@@ -221,6 +405,7 @@ private:
     bool
     joinMcast(int sock, const std::string& ifname,
               const std::string& mcast);
+
 };
 
 }; // namespace isc::dhcp
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index 985368e..f37194c 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -25,8 +25,6 @@ check-local:
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_builddir)/src/bin # for generated spec_config.h header
 AM_CPPFLAGS += -I$(top_srcdir)/src/bin
-AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
 AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/bin/dhcp6/tests\"
@@ -57,8 +55,8 @@ dhcp6_unittests_LDADD = $(GTEST_LDADD)
 dhcp6_unittests_LDADD += $(SQLITE_LIBS)
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 endif
 
 noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
index 72e48e4..50f37af 100644
--- a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -34,7 +34,7 @@ namespace test {
 class NakedDhcpv6Srv: public Dhcpv6Srv {
     // "naked" Interface Manager, exposes internal fields
 public:
-    NakedDhcpv6Srv() { }
+    NakedDhcpv6Srv():Dhcpv6Srv(DHCP6_SERVER_PORT + 10000) { }
 
     boost::shared_ptr<Pkt6>
     processSolicit(boost::shared_ptr<Pkt6>& request) {
@@ -53,30 +53,27 @@ public:
 };
 
 TEST_F(Dhcpv6SrvTest, basic) {
-    // there's almost no code now. What's there provides echo capability
-    // that is just a proof of concept and will be removed soon
-    // No need to thoroughly test it
-
     // srv has stubbed interface detection. It will read
     // interfaces.txt instead. It will pretend to have detected
     // fe80::1234 link-local address on eth0 interface. Obviously
     // an attempt to bind this socket will fail.
-    EXPECT_NO_THROW( {
-        Dhcpv6Srv * srv = new Dhcpv6Srv();
-
-        delete srv;
-        });
+    Dhcpv6Srv* srv = 0;
+    ASSERT_NO_THROW( {
+        // open an unpriviledged port
+        srv = new Dhcpv6Srv(DHCP6_SERVER_PORT + 10000);
+    });
 
+    delete srv;
 }
 
 TEST_F(Dhcpv6SrvTest, Solicit_basic) {
     NakedDhcpv6Srv * srv = 0;
-    EXPECT_NO_THROW( srv = new NakedDhcpv6Srv(); );
+    ASSERT_NO_THROW( srv = new NakedDhcpv6Srv(); );
 
     // a dummy content for client-id
     boost::shared_array<uint8_t> clntDuid(new uint8_t[32]);
-    for (int i=0; i<32; i++)
-        clntDuid[i] = 100+i;
+    for (int i = 0; i < 32; i++)
+        clntDuid[i] = 100 + i;
 
     boost::shared_ptr<Pkt6> sol =
         boost::shared_ptr<Pkt6>(new Pkt6(DHCPV6_SOLICIT,
diff --git a/src/bin/dhcp6/tests/iface_mgr_unittest.cc b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
index f126e6a..3cc2ae8 100644
--- a/src/bin/dhcp6/tests/iface_mgr_unittest.cc
+++ b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
@@ -20,9 +20,10 @@
 #include <arpa/inet.h>
 #include <gtest/gtest.h>
 
-#include "io_address.h"
-#include "dhcp/pkt6.h"
-#include "dhcp6/iface_mgr.h"
+#include <asiolink/io_address.h>
+#include <dhcp/pkt6.h>
+#include <dhcp6/iface_mgr.h>
+#include <dhcp/dhcp4.h>
 
 using namespace std;
 using namespace isc;
@@ -39,16 +40,7 @@ class NakedIfaceMgr: public IfaceMgr {
     // "naked" Interface Manager, exposes internal fields
 public:
     NakedIfaceMgr() { }
-    IfaceLst & getIfacesLst() { return ifaces_; }
-    void setSendSock(int sock) { sendsock_ = sock; }
-    void setRecvSock(int sock) { recvsock_ = sock; }
-
-    int openSocket(const std::string& ifname,
-                   const isc::asiolink::IOAddress& addr,
-                   int port) {
-        return IfaceMgr::openSocket(ifname, addr, port);
-    }
-
+    IfaceCollection & getIfacesLst() { return ifaces_; }
 };
 
 // dummy class for now, but this will be expanded when needed
@@ -56,6 +48,13 @@ class IfaceMgrTest : public ::testing::Test {
 public:
     IfaceMgrTest() {
     }
+
+    void createLoInterfacesTxt() {
+        unlink(INTERFACE_FILE);
+        fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+        fakeifaces << LOOPBACK << " ::1";
+        fakeifaces.close();
+    }
 };
 
 // We need some known interface to work reliably. Loopback interface
@@ -109,6 +108,7 @@ TEST_F(IfaceMgrTest, dhcp6Sniffer) {
     while (true) {
         pkt = ifacemgr->receive();
 
+        cout << "// this code is autogenerated. Do NOT edit." << endl;
         cout << "// Received " << pkt->data_len_ << " bytes packet:" << endl;
         cout << "Pkt6 *capture" << cnt++ << "() {" << endl;
         cout << "    Pkt6* pkt;" << endl;
@@ -183,10 +183,10 @@ TEST_F(IfaceMgrTest, getIface) {
 
     cout << "There are " << ifacemgr->getIfacesLst().size()
          << " interfaces." << endl;
-    for (IfaceMgr::IfaceLst::iterator iface=ifacemgr->getIfacesLst().begin();
+    for (IfaceMgr::IfaceCollection::iterator iface=ifacemgr->getIfacesLst().begin();
          iface != ifacemgr->getIfacesLst().end();
          ++iface) {
-        cout << "  " << iface->name_ << "/" << iface->ifindex_ << endl;
+        cout << "  " << iface->getFullName() << endl;
     }
 
 
@@ -195,15 +195,15 @@ TEST_F(IfaceMgrTest, getIface) {
     // ASSERT_NE(NULL, tmp); is not supported. hmmmm.
     ASSERT_TRUE( tmp != NULL );
 
-    EXPECT_STREQ( "en3", tmp->name_.c_str() );
-    EXPECT_EQ(5, tmp->ifindex_);
+    EXPECT_EQ( "en3", tmp->getName() );
+    EXPECT_EQ(5, tmp->getIndex());
 
     // check that interface can be retrieved by name
     tmp = ifacemgr->getIface("lo1");
     ASSERT_TRUE( tmp != NULL );
 
-    EXPECT_STREQ( "lo1", tmp->name_.c_str() );
-    EXPECT_EQ(1, tmp->ifindex_);
+    EXPECT_EQ( "lo1", tmp->getName() );
+    EXPECT_EQ(1, tmp->getIndex());
 
     // check that non-existing interfaces are not returned
     EXPECT_EQ(static_cast<void*>(NULL), ifacemgr->getIface("wifi0") );
@@ -231,58 +231,51 @@ TEST_F(IfaceMgrTest, detectIfaces) {
     IfaceMgr::Iface * eth0 = ifacemgr->getIface("eth0");
 
     // there should be one address
-    EXPECT_EQ(1, eth0->addrs_.size());
+    IfaceMgr::AddressCollection addrs = eth0->getAddresses();
+    ASSERT_EQ(1, addrs.size());
 
-    IOAddress * addr = &(*eth0->addrs_.begin());
-    ASSERT_TRUE( addr != NULL );
+    IOAddress addr = *addrs.begin();
 
-    EXPECT_STREQ( "fe80::1234", addr->toText().c_str() );
+    EXPECT_STREQ( "fe80::1234", addr.toText().c_str() );
 
     delete ifacemgr;
 }
 
-// TODO: disabled due to other naming on various systems
-// (lo in Linux, lo0 in BSD systems)
-// Fix for this is available on 1186 branch, will reenable
-// this test once 1186 is merged
-TEST_F(IfaceMgrTest, DISABLED_sockets) {
+TEST_F(IfaceMgrTest, sockets6) {
     // testing socket operation in a portable way is tricky
     // without interface detection implemented
 
+    createLoInterfacesTxt();
+
     NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
 
     IOAddress loAddr("::1");
 
+    Pkt6 pkt6(128);
+    pkt6.iface_ = LOOPBACK;
+
     // bind multicast socket to port 10547
     int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
     EXPECT_GT(socket1, 0); // socket > 0
 
+    EXPECT_EQ(socket1, ifacemgr->getSocket(pkt6));
+
     // bind unicast socket to port 10548
     int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10548);
     EXPECT_GT(socket2, 0);
 
-    // expect success. This address/port is already bound, but
-    // we are using SO_REUSEADDR, so we can bind it twice
-    int socket3 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
-
-    // rebinding succeeds on Linux, fails on BSD
-    // TODO: add OS-specific defines here (or modify code to
-    // behave the same way on all OSes, but that may not be
-    // possible
-    // EXPECT_GT(socket3, 0); // socket > 0
-
-    // we now have 3 sockets open at the same time. Looks good.
+    // removed code for binding socket twice to the same address/port
+    // as it caused problems on some platforms (e.g. Mac OS X)
 
     close(socket1);
     close(socket2);
-    close(socket3);
 
     delete ifacemgr;
 }
 
 // TODO: disabled due to other naming on various systems
 // (lo in Linux, lo0 in BSD systems)
-TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
+TEST_F(IfaceMgrTest, DISABLED_sockets6Mcast) {
     // testing socket operation in a portable way is tricky
     // without interface detection implemented
 
@@ -311,27 +304,21 @@ TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
     delete ifacemgr;
 }
 
-// TODO: disabled due to other naming on various systems
-// (lo in Linux, lo0 in BSD systems)
-// Fix for this is available on 1186 branch, will reenable
-// this test once 1186 is merged
-TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
+TEST_F(IfaceMgrTest, sendReceive6) {
+
     // testing socket operation in a portable way is tricky
     // without interface detection implemented
+    createLoInterfacesTxt();
 
-    fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
-    fakeifaces << LOOPBACK << " ::1";
-    fakeifaces.close();
-
-    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+    NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
 
     // let's assume that every supported OS have lo interface
     IOAddress loAddr("::1");
-    int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
-    int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
-
-    ifacemgr->setSendSock(socket2);
-    ifacemgr->setRecvSock(socket1);
+    int socket1 = 0, socket2 = 0;
+    EXPECT_NO_THROW(
+        socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+        socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+    );
 
     boost::shared_ptr<Pkt6> sendPkt(new Pkt6(128) );
 
@@ -349,7 +336,7 @@ TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
 
     EXPECT_EQ(true, ifacemgr->send(sendPkt));
 
-    rcvPkt = ifacemgr->receive();
+    rcvPkt = ifacemgr->receive6();
 
     ASSERT_TRUE( rcvPkt ); // received our own packet
 
@@ -359,7 +346,168 @@ TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
                         rcvPkt->data_len_) );
 
     EXPECT_EQ(sendPkt->remote_addr_.toText(), rcvPkt->remote_addr_.toText());
-    EXPECT_EQ(rcvPkt->remote_port_, 10546);
+
+    // since we opened 2 sockets on the same interface and none of them is multicast,
+    // none is preferred over the other for sending data, so we really should not
+    // assume the one or the other will always be choosen for sending data. Therefore
+    // we should accept both values as source ports.
+    EXPECT_TRUE( (rcvPkt->remote_port_ == 10546) || (rcvPkt->remote_port_ == 10547) );
+
+    delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, socket4) {
+
+    createLoInterfacesTxt();
+    NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+    // Let's assume that every supported OS have lo interface.
+    IOAddress loAddr("127.0.0.1");
+    // Use unprivileged port (it's convenient for running tests as non-root).
+    int socket1 = 0;
+
+    EXPECT_NO_THROW(
+        socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, DHCP4_SERVER_PORT + 10000);
+    );
+
+    EXPECT_GT(socket1, 0);
+
+    Pkt4 pkt(DHCPDISCOVER, 1234);
+    pkt.setIface(LOOPBACK);
+
+    // Expect that we get the socket that we just opened.
+    EXPECT_EQ(socket1, ifacemgr->getSocket(pkt));
+
+    close(socket1);
+
+    delete ifacemgr;
+}
+
+// Test the Iface structure itself
+TEST_F(IfaceMgrTest, iface) {
+    IfaceMgr::Iface* iface = 0;
+    EXPECT_NO_THROW(
+        iface = new IfaceMgr::Iface("eth0",1);
+    );
+
+    EXPECT_EQ("eth0", iface->getName());
+    EXPECT_EQ(1, iface->getIndex());
+    EXPECT_EQ("eth0/1", iface->getFullName());
+
+    // Let's make a copy of this address collection.
+    IfaceMgr::AddressCollection addrs = iface->getAddresses();
+
+    EXPECT_EQ(0, addrs.size());
+
+    IOAddress addr1("192.0.2.6");
+    iface->addAddress(addr1);
+
+    addrs = iface->getAddresses();
+    ASSERT_EQ(1, addrs.size());
+    EXPECT_EQ("192.0.2.6", addrs.at(0).toText());
+
+    // No such address, should return false.
+    EXPECT_FALSE(iface->delAddress(IOAddress("192.0.8.9")));
+
+    // This address is present, delete it!
+    EXPECT_TRUE(iface->delAddress(IOAddress("192.0.2.6")));
+
+    // Not really necessary, previous reference still points to the same
+    // collection. Let's do it anyway, as test code may serve as example
+    // usage code as well.
+    addrs = iface->getAddresses();
+
+    EXPECT_EQ(0, addrs.size());
+
+    EXPECT_NO_THROW(
+        delete iface;
+    );
+}
+
+TEST_F(IfaceMgrTest, socketInfo) {
+
+    // check that socketinfo for IPv4 socket is functional
+    IfaceMgr::SocketInfo sock1(7, IOAddress("192.0.2.56"), DHCP4_SERVER_PORT + 7);
+    EXPECT_EQ(7, sock1.sockfd_);
+    EXPECT_EQ("192.0.2.56", sock1.addr_.toText());
+    EXPECT_EQ(AF_INET, sock1.family_);
+    EXPECT_EQ(DHCP4_SERVER_PORT + 7, sock1.port_);
+
+    // check that socketinfo for IPv6 socket is functional
+    IfaceMgr::SocketInfo sock2(9, IOAddress("2001:db8:1::56"), DHCP4_SERVER_PORT + 9);
+    EXPECT_EQ(9, sock2.sockfd_);
+    EXPECT_EQ("2001:db8:1::56", sock2.addr_.toText());
+    EXPECT_EQ(AF_INET6, sock2.family_);
+    EXPECT_EQ(DHCP4_SERVER_PORT + 9, sock2.port_);
+
+    // now let's test if IfaceMgr handles socket info properly
+    createLoInterfacesTxt();
+    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+    IfaceMgr::Iface* loopback = ifacemgr->getIface(LOOPBACK);
+    ASSERT_TRUE(loopback);
+    loopback->addSocket(sock1);
+    loopback->addSocket(sock2);
+
+    Pkt6 pkt6(100);
+
+    // pkt6 dos not have interface set yet
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt6),
+        BadValue
+    );
+
+    // try to send over non-existing interface
+    pkt6.iface_ = "nosuchinterface45";
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt6),
+        BadValue
+    );
+
+    // this will work
+    pkt6.iface_ = LOOPBACK;
+    EXPECT_EQ(9, ifacemgr->getSocket(pkt6));
+
+    bool deleted = false;
+    EXPECT_NO_THROW(
+        deleted = ifacemgr->getIface(LOOPBACK)->delSocket(9);
+    );
+    EXPECT_EQ(true, deleted);
+
+    // it should throw again, there's no usable socket anymore
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt6),
+        Unexpected
+    );
+
+    // repeat for pkt4
+    Pkt4 pkt4(DHCPDISCOVER, 1);
+
+    // pkt4 does not have interface set yet.
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt4),
+        BadValue
+    );
+
+    // Try to send over non-existing interface.
+    pkt4.setIface("nosuchinterface45");
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt4),
+        BadValue
+    );
+
+    // Socket info is set, packet has well defined interface. It should work.
+    pkt4.setIface(LOOPBACK);
+    EXPECT_EQ(7, ifacemgr->getSocket(pkt4));
+
+    EXPECT_NO_THROW(
+        ifacemgr->getIface(LOOPBACK)->delSocket(7);
+    );
+
+    // It should throw again, there's no usable socket anymore.
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt4),
+        Unexpected
+    );
 
     delete ifacemgr;
 }
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 12ddab3..4d407bb 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -45,9 +45,9 @@ run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
 run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
 run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index cffafe1..cba98ae 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -27,5 +27,6 @@ endif
 	PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
 	TESTDATASRCDIR=$(abs_top_srcdir)/src/bin/xfrin/tests/testdata/ \
 	TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/xfrin/tests/testdata/ \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrin/tests/testdata/example.com.sqlite3 b/src/bin/xfrin/tests/testdata/example.com.sqlite3
index ed241c3..3538e3d 100644
Binary files a/src/bin/xfrin/tests/testdata/example.com.sqlite3 and b/src/bin/xfrin/tests/testdata/example.com.sqlite3 differ
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 1e4d942..eb2c747 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -14,15 +14,24 @@
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
 import unittest
+import re
 import shutil
 import socket
 import sys
 import io
 from isc.testutils.tsigctx_mock import MockTSIGContext
+from isc.testutils.rrset_utils import *
 from xfrin import *
 import xfrin
 from isc.xfrin.diff import Diff
 import isc.log
+# If we use any python library that is basically a wrapper for
+# a library we use as well (like sqlite3 in our datasources),
+# we must make sure we import ours first; If we have special
+# rpath or libtool rules to pick the correct version, python might
+# choose the wrong one first, if those rules aren't hit first.
+# This would result in missing symbols later.
+import sqlite3
 
 #
 # Commonly used (mostly constant) test parameters
@@ -34,11 +43,9 @@ TEST_RRCLASS_STR = 'IN'
 TEST_DB_FILE = 'db_file'
 TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
 TEST_MASTER_IPV4_ADDRINFO = (socket.AF_INET, socket.SOCK_STREAM,
-                             socket.IPPROTO_TCP, '',
                              (TEST_MASTER_IPV4_ADDRESS, 53))
 TEST_MASTER_IPV6_ADDRESS = '::1'
 TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
-                             socket.IPPROTO_TCP, '',
                              (TEST_MASTER_IPV6_ADDRESS, 53))
 
 TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
@@ -151,7 +158,7 @@ class MockDataSourceClient():
             return (DataSourceClient.PARTIALMATCH, self)
         raise ValueError('Unexpected input to mock client: bug in test case?')
 
-    def find(self, name, rrtype, target, options):
+    def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
         '''Mock ZoneFinder.find().
 
         It returns the predefined SOA RRset to queries for SOA of the common
@@ -170,7 +177,8 @@ class MockDataSourceClient():
             return (ZoneFinder.SUCCESS, dup_soa_rrset)
         raise ValueError('Unexpected input to mock finder: bug in test case?')
 
-    def get_updater(self, zone_name, replace):
+    def get_updater(self, zone_name, replace, journaling=False):
+        self._journaling_enabled = journaling
         return self
 
     def add_rrset(self, rrset):
@@ -221,7 +229,7 @@ class MockXfrinConnection(XfrinConnection):
     def __init__(self, sock_map, zone_name, rrclass, datasrc_client,
                  shutdown_event, master_addr, tsig_key=None):
         super().__init__(sock_map, zone_name, rrclass, MockDataSourceClient(),
-                         shutdown_event, master_addr)
+                         shutdown_event, master_addr, TEST_DB_FILE)
         self.query_data = b''
         self.reply_data = b''
         self.force_time_out = False
@@ -271,10 +279,11 @@ class MockXfrinConnection(XfrinConnection):
                 self.response_generator()
         return len(data)
 
-    def create_response_data(self, response=True, bad_qid=False,
+    def create_response_data(self, response=True, auth=True, bad_qid=False,
                              rcode=Rcode.NOERROR(),
                              questions=default_questions,
                              answers=default_answers,
+                             authorities=[],
                              tsig_ctx=None):
         resp = Message(Message.RENDER)
         qid = self.qid
@@ -285,8 +294,11 @@ class MockXfrinConnection(XfrinConnection):
         resp.set_rcode(rcode)
         if response:
             resp.set_header_flag(Message.HEADERFLAG_QR)
+        if auth:
+            resp.set_header_flag(Message.HEADERFLAG_AA)
         [resp.add_question(q) for q in questions]
         [resp.add_rrset(Message.SECTION_ANSWER, a) for a in answers]
+        [resp.add_rrset(Message.SECTION_AUTHORITY, a) for a in authorities]
 
         renderer = MessageRenderer()
         if tsig_ctx is not None:
@@ -339,13 +351,44 @@ class TestXfrinInitialSOA(TestXfrinState):
         self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual(type(XfrinFirstData()),
                          type(self.conn.get_xfrstate()))
-        self.assertEqual(1234, self.conn._end_serial)
+        self.assertEqual(1234, self.conn._end_serial.get_value())
 
     def test_handle_not_soa(self):
         # The given RR is not of SOA
         self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
                           self.ns_rrset)
 
+    def test_handle_ixfr_uptodate(self):
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRUptodate()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_uptodate2(self):
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(1235) # > soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRUptodate()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_uptodate3(self):
+        # Similar to the previous case, but checking serial number arithmetic
+        # comparison
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(0xffffffff)
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_axfr_uptodate(self):
+        # "request serial" should matter only for IXFR
+        self.conn._request_type = RRType.AXFR()
+        self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+
     def test_finish_message(self):
         self.assertTrue(self.state.finish_message(self.conn))
 
@@ -354,7 +397,8 @@ class TestXfrinFirstData(TestXfrinState):
         super().setUp()
         self.state = XfrinFirstData()
         self.conn._request_type = RRType.IXFR()
-        self.conn._request_serial = 1230 # arbitrary chosen serial < 1234
+        # arbitrary chosen serial < 1234:
+        self.conn._request_serial = isc.dns.Serial(1230)
         self.conn._diff = None           # should be replaced in the AXFR case
 
     def test_handle_ixfr_begin_soa(self):
@@ -434,7 +478,7 @@ class TestXfrinIXFRDelete(TestXfrinState):
         # false.
         self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual([], self.conn._diff.get_buffer())
-        self.assertEqual(1234, self.conn._current_serial)
+        self.assertEqual(1234, self.conn._current_serial.get_value())
         self.assertEqual(type(XfrinIXFRAddSOA()),
                          type(self.conn.get_xfrstate()))
 
@@ -465,7 +509,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         # We need record the state in 'conn' to check the case where the
         # state doesn't change.
         XfrinIXFRAdd().set_xfrstate(self.conn, XfrinIXFRAdd())
-        self.conn._current_serial = 1230
+        self.conn._current_serial = isc.dns.Serial(1230)
         self.state = self.conn.get_xfrstate()
 
     def test_handle_add_rr(self):
@@ -477,7 +521,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
 
     def test_handle_end_soa(self):
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
         self.conn._diff.add_data(self.ns_rrset) # put some dummy change
         self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -486,7 +530,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         self.assertEqual([], self.conn._diff.get_buffer())
 
     def test_handle_new_delete(self):
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
         # SOA RR whose serial is the current one means we are going to a new
         # difference, starting with removing that SOA.
         self.conn._diff.add_data(self.ns_rrset) # put some dummy change
@@ -497,7 +541,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
 
     def test_handle_out_of_sync(self):
         # getting SOA with an inconsistent serial.  This is an error.
-        self.conn._end_serial = 1235
+        self.conn._end_serial = isc.dns.Serial(1235)
         self.assertRaises(XfrinProtocolError, self.state.handle_rr,
                           self.conn, soa_rrset)
 
@@ -516,11 +560,24 @@ class TestXfrinIXFREnd(TestXfrinState):
     def test_finish_message(self):
         self.assertFalse(self.state.finish_message(self.conn))
 
+class TestXfrinIXFREnd(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinIXFRUptodate()
+
+    def test_handle_rr(self):
+        self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_finish_message(self):
+        self.assertRaises(XfrinZoneUptodate, self.state.finish_message,
+                          self.conn)
+
 class TestXfrinAXFR(TestXfrinState):
     def setUp(self):
         super().setUp()
         self.state = XfrinAXFR()
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
 
     def test_handle_rr(self):
         """
@@ -595,7 +652,10 @@ class TestXfrinConnection(unittest.TestCase):
             'questions': [example_soa_question],
             'bad_qid': False,
             'response': True,
+            'auth': True,
             'rcode': Rcode.NOERROR(),
+            'answers': default_answers,
+            'authorities': [],
             'tsig': False,
             'axfr_after_soa': self._create_normal_response_data
             }
@@ -652,8 +712,11 @@ class TestXfrinConnection(unittest.TestCase):
         self.conn.reply_data = self.conn.create_response_data(
             bad_qid=self.soa_response_params['bad_qid'],
             response=self.soa_response_params['response'],
+            auth=self.soa_response_params['auth'],
             rcode=self.soa_response_params['rcode'],
             questions=self.soa_response_params['questions'],
+            answers=self.soa_response_params['answers'],
+            authorities=self.soa_response_params['authorities'],
             tsig_ctx=verify_ctx)
         if self.soa_response_params['axfr_after_soa'] != None:
             self.conn.response_generator = \
@@ -684,6 +747,15 @@ class TestXfrinConnection(unittest.TestCase):
         rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
         return rrset
 
+    def _set_test_zone(self, zone_name):
+        '''Set the zone name for transfer to the specified one.
+
+        It also make sure that the SOA RR (if exist) is correctly (re)set.
+
+        '''
+        self.conn._zone_name = zone_name
+        self.conn._zone_soa = self.conn._get_zone_soa()
+
 class TestAXFR(TestXfrinConnection):
     def setUp(self):
         super().setUp()
@@ -778,25 +850,26 @@ class TestAXFR(TestXfrinConnection):
         # IXFR query
         msg = self.conn._create_query(RRType.IXFR())
         check_query(RRType.IXFR(), begin_soa_rrset)
-        self.assertEqual(1230, self.conn._request_serial)
+        self.assertEqual(1230, self.conn._request_serial.get_value())
 
     def test_create_ixfr_query_fail(self):
         # In these cases _create_query() will fail to find a valid SOA RR to
         # insert in the IXFR query, and should raise an exception.
 
-        self.conn._zone_name = Name('no-such-zone.example')
+        self._set_test_zone(Name('no-such-zone.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('partial-match-zone.example')
+        self._set_test_zone(Name('partial-match-zone.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('no-soa.example')
+        self._set_test_zone(Name('no-soa.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('dup-soa.example')
+        self._set_test_zone(Name('dup-soa.example'))
+        self.conn._zone_soa = self.conn._get_zone_soa()
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
@@ -827,8 +900,10 @@ class TestAXFR(TestXfrinConnection):
         self.conn._tsig_key = TSIG_KEY
         # server tsig check fail, return with RCODE 9 (NOTAUTH)
         self.conn._send_query(RRType.SOA())
-        self.conn.reply_data = self.conn.create_response_data(rcode=Rcode.NOTAUTH())
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.conn.reply_data = \
+            self.conn.create_response_data(rcode=Rcode.NOTAUTH())
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_without_end_soa(self):
         self.conn._send_query(RRType.AXFR())
@@ -841,7 +916,8 @@ class TestAXFR(TestXfrinConnection):
     def test_response_bad_qid(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_error_code_bad_sig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -852,7 +928,7 @@ class TestAXFR(TestXfrinConnection):
                 rcode=Rcode.SERVFAIL())
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADSIG",
                                self.conn._handle_xfrin_responses)
 
@@ -864,7 +940,7 @@ class TestAXFR(TestXfrinConnection):
         self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADKEY",
                                self.conn._handle_xfrin_responses)
 
@@ -877,18 +953,21 @@ class TestAXFR(TestXfrinConnection):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(
             rcode=Rcode.SERVFAIL())
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_multi_question(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(
             questions=[example_axfr_question, example_axfr_question])
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_non_response(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(response = False)
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_soacheck(self):
         # we need to defer the creation until we know the QID, which is
@@ -903,7 +982,7 @@ class TestAXFR(TestXfrinConnection):
     def test_soacheck_badqid(self):
         self.soa_response_params['bad_qid'] = True
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_bad_qid_bad_sig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -913,19 +992,123 @@ class TestAXFR(TestXfrinConnection):
         self.conn.response_generator = self._create_soa_response_data
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADSIG",
                                self.conn._check_soa_serial)
 
     def test_soacheck_non_response(self):
         self.soa_response_params['response'] = False
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_error_code(self):
         self.soa_response_params['rcode'] = Rcode.SERVFAIL()
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_notauth(self):
+        self.soa_response_params['auth'] = False
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate(self):
+        # Primary's SOA serial is identical the local serial
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate2(self):
+        # Primary's SOA serial is "smaller" than the local serial
+        self.soa_response_params['answers'] = [create_soa(1229)]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate3(self):
+        # Similar to the previous case, but checking the comparison is based
+        # on the serial number arithmetic.
+        self.soa_response_params['answers'] = [create_soa(0xffffffff)]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_newzone(self):
+        # Primary's SOA is 'old', but this secondary doesn't know anything
+        # about the zone yet, so it should accept it.
+        def response_generator():
+            # _request_serial is set in _check_soa_serial().  Reset it here.
+            self.conn._request_serial = None
+            self._create_soa_response_data()
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = response_generator
+        self.assertEqual(XFRIN_OK, self.conn._check_soa_serial())
+
+    def test_soacheck_question_empty(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = []
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_name_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(Name('example.org'),
+                                                          TEST_RRCLASS,
+                                                          RRType.SOA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_class_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+                                                          RRClass.CH(),
+                                                          RRType.SOA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_type_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+                                                          TEST_RRCLASS,
+                                                          RRType.AAAA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_no_soa(self):
+        # The response just doesn't contain SOA without any other indication
+        # of errors.
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_soa_name_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = [create_soa(1234,
+                                                          Name('example.org'))]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_soa_class_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        soa = RRset(TEST_ZONE_NAME, RRClass.CH(), RRType.SOA(), RRTTL(0))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.CH(), 'm. r. 1234 0 0 0 0'))
+        self.soa_response_params['answers'] = [soa]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_multiple_soa(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = [soa_rrset, soa_rrset]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_cname_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        # Add SOA to answer, too, to make sure that it that deceives the parser
+        self.soa_response_params['answers'] = [soa_rrset, create_cname()]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_referral_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.soa_response_params['authorities'] = [create_ns('ns.example.com')]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_nodata_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.soa_response_params['authorities'] = [soa_rrset]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig(self):
         # Use a mock tsig context emulating a validly signed response
@@ -944,7 +1127,7 @@ class TestAXFR(TestXfrinConnection):
         self.soa_response_params['rcode'] = Rcode.NOTAUTH()
         self.conn.response_generator = self._create_soa_response_data
 
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig_noerror_badsig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -957,7 +1140,7 @@ class TestAXFR(TestXfrinConnection):
         # treat this as a final failure (just as BIND 9 does).
         self.conn.response_generator = self._create_soa_response_data
 
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig_unsigned_response(self):
         # we can use a real TSIGContext for this.  the response doesn't
@@ -966,14 +1149,14 @@ class TestAXFR(TestXfrinConnection):
         # it as a fatal transaction failure, too.
         self.conn._tsig_key = TSIG_KEY
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_unexpected_tsig_response(self):
         # we reject unexpected TSIG in responses (following BIND 9's
         # behavior)
         self.soa_response_params['tsig'] = True
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_response_shutdown(self):
         self.conn.response_generator = self._create_normal_response_data
@@ -1132,6 +1315,7 @@ class TestAXFR(TestXfrinConnection):
     def test_do_xfrin(self):
         self.conn.response_generator = self._create_normal_response_data
         self.assertEqual(self.conn.do_xfrin(False), XFRIN_OK)
+        self.assertFalse(self.conn._datasrc_client._journaling_enabled)
 
     def test_do_xfrin_with_tsig(self):
         # use TSIG with a mock context.  we fake all verify results to
@@ -1234,6 +1418,18 @@ class TestAXFR(TestXfrinConnection):
         self.conn.response_generator = self._create_soa_response_data
         self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
 
+    def test_do_soacheck_uptodate(self):
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+
+    def test_do_soacheck_protocol_error(self):
+        # There are several cases, but at this level it's sufficient to check
+        # only one.  We use the case where there's no SOA in the response.
+        self.soa_response_params['answers'] = []
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
+
     def test_do_soacheck_and_xfrin_with_tsig(self):
         # We are going to have a SOA query/response transaction, followed by
         # AXFR, all TSIG signed.  xfrin should use a new TSIG context for
@@ -1266,9 +1462,8 @@ class TestIXFRResponse(TestXfrinConnection):
     def setUp(self):
         super().setUp()
         self.conn._query_id = self.conn.qid = 1035
-        self.conn._request_serial = 1230
+        self.conn._request_serial = isc.dns.Serial(1230)
         self.conn._request_type = RRType.IXFR()
-        self._zone_name = TEST_ZONE_NAME
         self.conn._datasrc_client = MockDataSourceClient()
         XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
 
@@ -1283,6 +1478,7 @@ class TestIXFRResponse(TestXfrinConnection):
             answers=[soa_rrset, begin_soa_rrset, soa_rrset, soa_rrset])
         self.conn._handle_xfrin_responses()
         self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
+        self.assertTrue(self.conn._datasrc_client._journaling_enabled)
         self.assertEqual([], self.conn._datasrc_client.diffs)
         check_diffs(self.assertEqual,
                     [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
@@ -1342,6 +1538,16 @@ class TestIXFRResponse(TestXfrinConnection):
                     [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
                     self.conn._datasrc_client.committed_diffs)
 
+    def test_ixfr_response_uptodate(self):
+        '''IXFR response indicates the zone is new enough'''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[begin_soa_rrset])
+        self.assertRaises(XfrinZoneUptodate, self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
     def test_ixfr_response_broken(self):
         '''Test with a broken response.
 
@@ -1374,6 +1580,22 @@ class TestIXFRResponse(TestXfrinConnection):
                     [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
                     self.conn._datasrc_client.committed_diffs)
 
+    def test_ixfr_response_uptodate_extra(self):
+        '''Similar to 'uptodate' test, but with extra bogus data.
+
+        In either case an exception will be raised, but in this case it's
+        considered an error.
+
+        '''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[begin_soa_rrset, soa_rrset])
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
     def test_ixfr_to_axfr_response(self):
         '''AXFR-style IXFR response.
 
@@ -1387,6 +1609,8 @@ class TestIXFRResponse(TestXfrinConnection):
             answers=[soa_rrset, ns_rr, a_rr, soa_rrset])
         self.conn._handle_xfrin_responses()
         self.assertEqual(type(XfrinAXFREnd()), type(self.conn.get_xfrstate()))
+        # In the case AXFR-style IXFR, journaling must have been disabled.
+        self.assertFalse(self.conn._datasrc_client._journaling_enabled)
         self.assertEqual([], self.conn._datasrc_client.diffs)
         # The SOA should be added exactly once, and in our implementation
         # it should be added at the end of the sequence.
@@ -1475,13 +1699,25 @@ class TestIXFRSession(TestXfrinConnection):
         self.conn.response_generator = create_ixfr_response
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
 
-    def test_do_xfrin_fail(self):
+    def test_do_xfrin_fail2(self):
         '''IXFR fails due to a bogus DNS message.
 
         '''
         self._create_broken_response_data()
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
 
+    def test_do_xfrin_uptodate(self):
+        '''IXFR is (gracefully) aborted because serial is not new
+
+        '''
+        def create_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR())],
+                answers=[begin_soa_rrset])
+        self.conn.response_generator = create_response
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
 class TestXFRSessionWithSQLite3(TestXfrinConnection):
     '''Tests for XFR sessions using an SQLite3 DB.
 
@@ -1515,8 +1751,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
     def get_zone_serial(self):
         result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
         self.assertEqual(DataSourceClient.SUCCESS, result)
-        result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA(),
-                                  None, ZoneFinder.FIND_DEFAULT)
+        result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA())
         self.assertEqual(ZoneFinder.SUCCESS, result)
         self.assertEqual(1, soa.get_rdata_count())
         return get_soa_serial(soa.get_rdata()[0])
@@ -1524,7 +1759,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
     def record_exist(self, name, type):
         result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
         self.assertEqual(DataSourceClient.SUCCESS, result)
-        result, soa = finder.find(name, type, None, ZoneFinder.FIND_DEFAULT)
+        result, soa = finder.find(name, type)
         return result == ZoneFinder.SUCCESS
 
     def test_do_ixfrin_sqlite3(self):
@@ -1536,9 +1771,22 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
         self.conn.response_generator = create_ixfr_response
 
         # Confirm xfrin succeeds and SOA is updated
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
+
+        # Also confirm the corresponding diffs are stored in the diffs table
+        conn = sqlite3.connect(self.sqlite3db_obj)
+        cur = conn.cursor()
+        cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+        soa_rdata_base = 'master.example.com. admin.example.com. ' + \
+            'SERIAL 3600 1800 2419200 7200'
+        self.assertEqual(cur.fetchall(),
+                         [(TEST_ZONE_NAME_STR, 'SOA', 3600,
+                           re.sub('SERIAL', str(1230), soa_rdata_base)),
+                          (TEST_ZONE_NAME_STR, 'SOA', 3600,
+                           re.sub('SERIAL', str(1234), soa_rdata_base))])
+        conn.close()
 
     def test_do_ixfrin_sqlite3_fail(self):
         '''Similar to the previous test, but xfrin fails due to error.
@@ -1554,12 +1802,12 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                          self._create_soa('1235')])
         self.conn.response_generator = create_ixfr_response
 
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
 
     def test_do_ixfrin_nozone_sqlite3(self):
-        self.conn._zone_name = Name('nosuchzone.example')
+        self._set_test_zone(Name('nosuchzone.example'))
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
         # This should fail even before starting state transition
         self.assertEqual(None, self.conn.get_xfrstate())
@@ -1575,11 +1823,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
         self.conn.response_generator = create_response
 
         # Confirm xfrin succeeds and SOA is updated, A RR is deleted.
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
         self.assertFalse(self.record_exist(Name('dns01.example.com'),
                                            RRType.A()))
 
@@ -1607,11 +1855,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                 answers=[soa_rrset, self._create_ns(), soa_rrset, soa_rrset])
         self.conn.response_generator = create_response
 
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
 
@@ -1645,11 +1893,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                                     RRType.AXFR())],
                 answers=[soa_rrset, self._create_ns(), soa_rrset])
         self.conn.response_generator = create_response
-        self.conn._zone_name = Name('example.com')
+        self._set_test_zone(Name('example.com'))
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
         self.assertEqual(type(XfrinAXFREnd()),
                          type(self.conn.get_xfrstate()))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
         self.assertFalse(self.record_exist(Name('dns01.example.com'),
                                            RRType.A()))
 
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 911b3b3..1167bef 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -24,6 +24,7 @@ import struct
 import threading
 import socket
 import random
+from functools import reduce
 from optparse import OptionParser, OptionValueError
 from isc.config.ccsession import *
 from isc.notify import notify_out
@@ -75,9 +76,10 @@ DEFAULT_MASTER_PORT = 53
 DEFAULT_ZONE_CLASS = RRClass.IN()
 
 __version__ = 'BIND10'
-# define xfrin rcode
-XFRIN_OK = 0
-XFRIN_FAIL = 1
+
+# Internal result codes of an xfr session
+XFRIN_OK = 0                    # normal success
+XFRIN_FAIL = 1                  # general failure (internal/external)
 
 class XfrinException(Exception):
     pass
@@ -87,6 +89,11 @@ class XfrinProtocolError(Exception):
     '''
     pass
 
+class XfrinZoneUptodate(Exception):
+    '''TBD
+    '''
+    pass
+
 class XfrinZoneInfoException(Exception):
     """This exception is raised if there is an error in the given
        configuration (part), or when a command does not have a required
@@ -153,7 +160,7 @@ def format_addrinfo(addrinfo):
                         "appear to be consisting of (family, socktype, (addr, port))")
 
 def get_soa_serial(soa_rdata):
-    '''Extract the serial field of an SOA RDATA and returns it as an intger.
+    '''Extract the serial field of SOA RDATA and return it as a Serial object.
 
     We don't have to be very efficient here, so we first dump the entire RDATA
     as a string and convert the first corresponding field.  This should be
@@ -162,7 +169,7 @@ def get_soa_serial(soa_rdata):
     should be a more direct and convenient way to get access to the SOA
     fields.
     '''
-    return int(soa_rdata.to_text().split()[2])
+    return Serial(int(soa_rdata.to_text().split()[2]))
 
 class XfrinState:
     '''
@@ -181,12 +188,12 @@ class XfrinState:
                              (AXFR or
             (recv SOA)        AXFR-style IXFR)  (SOA, add)
     InitialSOA------->FirstData------------->AXFR--------->AXFREnd
-                          |                  |  ^         (post xfr
-                          |                  |  |        checks, then
-                          |                  +--+        commit)
-                          |            (non SOA, add)
-                          |
-                          |                     (non SOA, delete)
+         |                |                  |  ^         (post xfr
+         |(IXFR &&        |                  |  |        checks, then
+         | recv SOA       |                  +--+        commit)
+         | not new)       |            (non SOA, add)
+         V                |
+    IXFRUptodate          |                     (non SOA, delete)
                (pure IXFR,|                           +-------+
             keep handling)|             (Delete SOA)  V       |
                           + ->IXFRDeleteSOA------>IXFRDelete--+
@@ -300,13 +307,14 @@ class XfrinInitialSOA(XfrinState):
                                      + rr.get_type().to_text() + ' received)')
         conn._end_serial = get_soa_serial(rr.get_rdata()[0])
 
-        # FIXME: we need to check the serial is actually greater than ours.
-        # To do so, however, we need to implement serial number arithmetic.
-        # Although it wouldn't be a big task, we'll leave it for a separate
-        # task for now.  (Always performing xfr could be inefficient, but
-        # shouldn't do any harm otherwise)
+        if conn._request_type == RRType.IXFR() and \
+                conn._end_serial <= conn._request_serial:
+            logger.info(XFRIN_IXFR_UPTODATE, conn.zone_str(),
+                        conn._request_serial, conn._end_serial)
+            self.set_xfrstate(conn, XfrinIXFRUptodate())
+        else:
+            self.set_xfrstate(conn, XfrinFirstData())
 
-        self.set_xfrstate(conn, XfrinFirstData())
         return True
 
 class XfrinFirstData(XfrinState):
@@ -367,7 +375,10 @@ class XfrinIXFRDeleteSOA(XfrinState):
                                  ' RR is given in IXFRDeleteSOA state')
         # This is the beginning state of one difference sequence (changes
         # for one SOA update).  We need to create a new Diff object now.
-        conn._diff = Diff(conn._datasrc_client, conn._zone_name)
+        # Note also that we (unconditionally) enable journaling here.  The
+        # Diff constructor may internally disable it, however, if the
+        # underlying data source doesn't support journaling.
+        conn._diff = Diff(conn._datasrc_client, conn._zone_name, False, True)
         conn._diff.delete_data(rr)
         self.set_xfrstate(conn, XfrinIXFRDelete())
         return True
@@ -427,6 +438,14 @@ class XfrinIXFREnd(XfrinState):
         '''
         return False
 
+class XfrinIXFRUptodate(XfrinState):
+    def handle_rr(self, conn, rr):
+        raise XfrinProtocolError('Extra data after single IXFR response ' +
+                                 rr.to_text())
+
+    def finish_message(self, conn):
+        raise XfrinZoneUptodate
+
 class XfrinAXFR(XfrinState):
     def handle_rr(self, conn, rr):
         """
@@ -470,10 +489,13 @@ class XfrinConnection(asyncore.dispatcher):
 
     def __init__(self,
                  sock_map, zone_name, rrclass, datasrc_client,
-                 shutdown_event, master_addrinfo, tsig_key=None,
+                 shutdown_event, master_addrinfo, db_file, tsig_key=None,
                  idle_timeout=60):
         '''Constructor of the XfirnConnection class.
 
+        db_file: SQLite3 DB file.  Unforutnately we still need this for
+                 temporary workaround in _get_zone_soa().  This should be
+                 removed when we eliminate the need for the workaround.
         idle_timeout: max idle time for read data from socket.
         datasrc_client: the data source client object used for the XFR session.
                         This will eventually replace db_file completely.
@@ -497,7 +519,9 @@ class XfrinConnection(asyncore.dispatcher):
         self._rrclass = rrclass
 
         # Data source handler
+        self._db_file = db_file
         self._datasrc_client = datasrc_client
+        self._zone_soa = self._get_zone_soa()
 
         self._sock_map = sock_map
         self._soa_rr_count = 0
@@ -521,6 +545,55 @@ class XfrinConnection(asyncore.dispatcher):
         self.create_socket(self._master_addrinfo[0], self._master_addrinfo[1])
         self.setblocking(1)
 
+    def _get_zone_soa(self):
+        '''Retrieve the current SOA RR of the zone to be transferred.
+
+        It will be used for various purposes in subsequent xfr protocol
+        processing.   It is validly possible that the zone is currently
+        empty and therefore doesn't have an SOA, so this method doesn't
+        consider it an error and returns None in such a case.  It may or
+        may not result in failure in the actual processing depending on
+        how the SOA is used.
+
+        When the zone has an SOA RR, this method makes sure that it's
+        valid, i.e., it has exactly one RDATA; if it is not the case
+        this method returns None.
+
+        If the underlying data source doesn't even know the zone, this method
+        tries to provide backward compatible behavior where xfrin is
+        responsible for creating zone in the corresponding DB table.
+        For a longer term we should deprecate this behavior by introducing
+        more generic zone management framework, but at the moment we try
+        to not surprise existing users.  (Note also that the part of
+        providing the compatible behavior uses the old data source API.
+        We'll deprecate this API in a near future, too).
+
+        '''
+        # get the zone finder.  this must be SUCCESS (not even
+        # PARTIALMATCH) because we are specifying the zone origin name.
+        result, finder = self._datasrc_client.find_zone(self._zone_name)
+        if result != DataSourceClient.SUCCESS:
+            # The data source doesn't know the zone.  For now, we provide
+            # backward compatibility and creates a new one ourselves.
+            isc.datasrc.sqlite3_ds.load(self._db_file,
+                                        self._zone_name.to_text(),
+                                        lambda : [])
+            logger.warn(XFRIN_ZONE_CREATED, self.zone_str())
+            # try again
+            result, finder = self._datasrc_client.find_zone(self._zone_name)
+        if result != DataSourceClient.SUCCESS:
+            return None
+        result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
+                                        None, ZoneFinder.FIND_DEFAULT)
+        if result != ZoneFinder.SUCCESS:
+            logger.info(XFRIN_ZONE_NO_SOA, self.zone_str())
+            return None
+        if soa_rrset.get_rdata_count() != 1:
+            logger.warn(XFRIN_ZONE_MULTIPLE_SOA, self.zone_str(),
+                        soa_rrset.get_rdata_count())
+            return None
+        return soa_rrset
+
     def __set_xfrstate(self, new_state):
         self.__state = new_state
 
@@ -542,37 +615,16 @@ class XfrinConnection(asyncore.dispatcher):
                          str(e))
             return False
 
-    def _get_zone_soa(self):
-        result, finder = self._datasrc_client.find_zone(self._zone_name)
-        if result != DataSourceClient.SUCCESS:
-            raise XfrinException('Zone not found in the given data ' +
-                                 'source: ' + self.zone_str())
-        result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
-                                        None, ZoneFinder.FIND_DEFAULT)
-        if result != ZoneFinder.SUCCESS:
-            raise XfrinException('SOA RR not found in zone: ' +
-                                 self.zone_str())
-        # Especially for database-based zones, a working zone may be in
-        # a broken state where it has more than one SOA RR.  We proactively
-        # check the condition and abort the xfr attempt if we identify it.
-        if soa_rrset.get_rdata_count() != 1:
-            raise XfrinException('Invalid number of SOA RRs for ' +
-                                 self.zone_str() + ': ' +
-                                 str(soa_rrset.get_rdata_count()))
-        return soa_rrset
-
     def _create_query(self, query_type):
         '''Create an XFR-related query message.
 
-        query_type is either SOA, AXFR or IXFR.  For type IXFR, it searches
-        the associated data source for the current SOA record to include
-        it in the query.  If the corresponding zone or the SOA record
-        cannot be found, it raises an XfrinException exception.  Note that
-        this may not necessarily a broken configuration; for the first attempt
-        of transfer the secondary may not have any boot-strap zone
-        information, in which case IXFR simply won't work.  The xfrin
-        should then fall back to AXFR.  _request_serial is recorded for
-        later use.
+        query_type is either SOA, AXFR or IXFR.  An IXFR query needs the
+        zone's current SOA record.  If it's not known, it raises an
+        XfrinException exception.  Note that this may not necessarily a
+        broken configuration; for the first attempt of transfer the secondary
+        may not have any boot-strap zone information, in which case IXFR
+        simply won't work.  The xfrin should then fall back to AXFR.
+        _request_serial is recorded for later use.
 
         '''
         msg = Message(Message.RENDER)
@@ -582,27 +634,19 @@ class XfrinConnection(asyncore.dispatcher):
         msg.set_opcode(Opcode.QUERY())
         msg.set_rcode(Rcode.NOERROR())
         msg.add_question(Question(self._zone_name, self._rrclass, query_type))
+
+        # Remember our serial, if known
+        self._request_serial = get_soa_serial(self._zone_soa.get_rdata()[0]) \
+            if self._zone_soa is not None else None
+
+        # Set the authority section with our SOA for IXFR
         if query_type == RRType.IXFR():
-            # get the zone finder.  this must be SUCCESS (not even
-            # PARTIALMATCH) because we are specifying the zone origin name.
-            zone_soa_rr = self._get_zone_soa()
-            msg.add_rrset(Message.SECTION_AUTHORITY, zone_soa_rr)
-            self._request_serial = get_soa_serial(zone_soa_rr.get_rdata()[0])
-        else:
-            # For AXFR, we temporarily provide backward compatible behavior
-            # where xfrin is responsible for creating zone in the corresponding
-            # DB table.  Note that the code below uses the old data source
-            # API and assumes SQLite3 in an ugly manner.  We'll have to
-            # develop a better way of managing zones in a generic way and
-            # eliminate the code like the one here.
-            try:
-                self._get_zone_soa()
-            except XfrinException:
-                def empty_rr_generator():
-                    return []
-                isc.datasrc.sqlite3_ds.load(self._db_file,
-                                            self._zone_name.to_text(),
-                                            empty_rr_generator)
+            if self._zone_soa is None:
+                # (incremental) IXFR doesn't work without known SOA
+                raise XfrinException('Failed to create IXFR query due to no ' +
+                                     'SOA for ' + self.zone_str())
+            msg.add_rrset(Message.SECTION_AUTHORITY, self._zone_soa)
+
         return msg
 
     def _send_data(self, data):
@@ -656,7 +700,8 @@ class XfrinConnection(asyncore.dispatcher):
         if self._tsig_ctx is not None:
             tsig_error = self._tsig_ctx.verify(tsig_record, response_data)
             if tsig_error != TSIGError.NOERROR:
-                raise XfrinException('TSIG verify fail: %s' % str(tsig_error))
+                raise XfrinProtocolError('TSIG verify fail: %s' %
+                                         str(tsig_error))
         elif tsig_record is not None:
             # If the response includes a TSIG while we didn't sign the query,
             # we treat it as an error.  RFC doesn't say anything about this
@@ -665,13 +710,78 @@ class XfrinConnection(asyncore.dispatcher):
             # implementation would return such a response, and since this is
             # part of security mechanism, it's probably better to be more
             # strict.
-            raise XfrinException('Unexpected TSIG in response')
+            raise XfrinProtocolError('Unexpected TSIG in response')
+
+    def __parse_soa_response(self, msg, response_data):
+        '''Parse a response to SOA query and extract the SOA from answer.
+
+        This is a subroutine of _check_soa_serial().  This method also
+        validates message, and rejects bogus responses with XfrinProtocolError.
+
+        If everything is okay, it returns the SOA RR from the answer section
+        of the response.
+
+        '''
+        # Check TSIG integrity and validate the header.  Unlike AXFR/IXFR,
+        # we should be more strict for SOA queries and check the AA flag, too.
+        self._check_response_tsig(msg, response_data)
+        self._check_response_header(msg)
+        if not msg.get_header_flag(Message.HEADERFLAG_AA):
+            raise XfrinProtocolError('non-authoritative answer to SOA query')
+
+        # Validate the question section
+        n_question = msg.get_rr_count(Message.SECTION_QUESTION)
+        if n_question != 1:
+            raise XfrinProtocolError('Invalid response to SOA query: ' +
+                                     '(' + str(n_question) + ' questions, 1 ' +
+                                     'expected)')
+        resp_question = msg.get_question()[0]
+        if resp_question.get_name() != self._zone_name or \
+                resp_question.get_class() != self._rrclass or \
+                resp_question.get_type() != RRType.SOA():
+            raise XfrinProtocolError('Invalid response to SOA query: '
+                                     'question mismatch: ' +
+                                     str(resp_question))
+
+        # Look into the answer section for SOA
+        soa = None
+        for rr in msg.get_section(Message.SECTION_ANSWER):
+            if rr.get_type() == RRType.SOA():
+                if soa is not None:
+                    raise XfrinProtocolError('SOA response had multiple SOAs')
+                soa = rr
+            # There should not be a CNAME record at top of zone.
+            if rr.get_type() == RRType.CNAME():
+                raise XfrinProtocolError('SOA query resulted in CNAME')
+
+        # If SOA is not found, try to figure out the reason then report it.
+        if soa is None:
+            # See if we have any SOA records in the authority section.
+            for rr in msg.get_section(Message.SECTION_AUTHORITY):
+                if rr.get_type() == RRType.NS():
+                    raise XfrinProtocolError('SOA query resulted in referral')
+                if rr.get_type() == RRType.SOA():
+                    raise XfrinProtocolError('SOA query resulted in NODATA')
+            raise XfrinProtocolError('No SOA record found in response to ' +
+                                     'SOA query')
+
+        # Check if the SOA is really what we asked for
+        if soa.get_name() != self._zone_name or \
+                soa.get_class() != self._rrclass:
+            raise XfrinProtocolError("SOA response doesn't match query: " +
+                                     str(soa))
+
+        # All okay, return it
+        return soa
+
 
     def _check_soa_serial(self):
-        ''' Compare the soa serial, if soa serial in master is less than
-        the soa serial in local, Finish xfrin.
-        False: soa serial in master is less or equal to the local one.
-        True: soa serial in master is bigger
+        '''Send SOA query and compare the local and remote serials.
+
+        If we know our local serial and the remote serial isn't newer
+        than ours, we abort the session with XfrinZoneUptodate.
+        On success it returns XFRIN_OK for testing.  The caller won't use it.
+
         '''
 
         self._send_query(RRType.SOA())
@@ -679,18 +789,23 @@ class XfrinConnection(asyncore.dispatcher):
         msg_len = socket.htons(struct.unpack('H', data_len)[0])
         soa_response = self._get_request_response(msg_len)
         msg = Message(Message.PARSE)
-        msg.from_wire(soa_response)
+        msg.from_wire(soa_response, Message.PRESERVE_ORDER)
+
+        # Validate/parse the rest of the response, and extract the SOA
+        # from the answer section
+        soa = self.__parse_soa_response(msg, soa_response)
+
+        # Compare the two serials.  If ours is 'new', abort with ZoneUptodate.
+        primary_serial = get_soa_serial(soa.get_rdata()[0])
+        if self._request_serial is not None and \
+                self._request_serial >= primary_serial:
+            if self._request_serial != primary_serial:
+                logger.info(XFRIN_ZONE_SERIAL_AHEAD, primary_serial,
+                            self.zone_str(),
+                            format_addrinfo(self._master_addrinfo),
+                            self._request_serial)
+            raise XfrinZoneUptodate
 
-        # TSIG related checks, including an unexpected signed response
-        self._check_response_tsig(msg, soa_response)
-
-        # perform some minimal level validation.  It's an open issue how
-        # strict we should be (see the comment in _check_response_header())
-        self._check_response_header(msg)
-
-        # TODO, need select soa record from data source then compare the two
-        # serial, current just return OK, since this function hasn't been used
-        # now.
         return XFRIN_OK
 
     def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
@@ -701,22 +816,30 @@ class XfrinConnection(asyncore.dispatcher):
             self._request_type = request_type
             # Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
             # to hardcode here.
-            request_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
+            req_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
             if check_soa:
-                ret =  self._check_soa_serial()
-
-            if ret == XFRIN_OK:
-                logger.info(XFRIN_XFR_TRANSFER_STARTED, request_str,
-                            self.zone_str())
-                self._send_query(self._request_type)
-                self.__state = XfrinInitialSOA()
-                self._handle_xfrin_responses()
-                logger.info(XFRIN_XFR_TRANSFER_SUCCESS, request_str,
-                            self.zone_str())
-
-        except (XfrinException, XfrinProtocolError) as e:
-            logger.error(XFRIN_XFR_TRANSFER_FAILURE, request_str,
-                         self.zone_str(), str(e))
+                self._check_soa_serial()
+
+            logger.info(XFRIN_XFR_TRANSFER_STARTED, req_str, self.zone_str())
+            self._send_query(self._request_type)
+            self.__state = XfrinInitialSOA()
+            self._handle_xfrin_responses()
+            logger.info(XFRIN_XFR_TRANSFER_SUCCESS, req_str, self.zone_str())
+
+        except XfrinZoneUptodate:
+            # Eventually we'll probably have to treat this case as a trigger
+            # of trying another primary server, etc, but for now we treat it
+            # as "success".
+            pass
+        except XfrinProtocolError as e:
+            logger.info(XFRIN_XFR_TRANSFER_PROTOCOL_ERROR, req_str,
+                        self.zone_str(),
+                        format_addrinfo(self._master_addrinfo), str(e))
+            ret = XFRIN_FAIL
+        except XfrinException as e:
+            logger.error(XFRIN_XFR_TRANSFER_FAILURE, req_str,
+                         self.zone_str(),
+                         format_addrinfo(self._master_addrinfo), str(e))
             ret = XFRIN_FAIL
         except Exception as e:
             # Catching all possible exceptions like this is generally not a
@@ -727,7 +850,7 @@ class XfrinConnection(asyncore.dispatcher):
             # catch it here, but until then we need broadest coverage so that
             # we won't miss anything.
 
-            logger.error(XFRIN_XFR_OTHER_FAILURE, request_str,
+            logger.error(XFRIN_XFR_OTHER_FAILURE, req_str,
                          self.zone_str(), str(e))
             ret = XFRIN_FAIL
         finally:
@@ -751,13 +874,14 @@ class XfrinConnection(asyncore.dispatcher):
 
         msg_rcode = msg.get_rcode()
         if msg_rcode != Rcode.NOERROR():
-            raise XfrinException('error response: %s' % msg_rcode.to_text())
+            raise XfrinProtocolError('error response: %s' %
+                                     msg_rcode.to_text())
 
         if not msg.get_header_flag(Message.HEADERFLAG_QR):
-            raise XfrinException('response is not a response')
+            raise XfrinProtocolError('response is not a response')
 
         if msg.get_qid() != self._query_id:
-            raise XfrinException('bad query id')
+            raise XfrinProtocolError('bad query id')
 
     def _check_response_status(self, msg):
         '''Check validation of xfr response. '''
@@ -765,7 +889,7 @@ class XfrinConnection(asyncore.dispatcher):
         self._check_response_header(msg)
 
         if msg.get_rr_count(Message.SECTION_QUESTION) > 1:
-            raise XfrinException('query section count greater than 1')
+            raise XfrinProtocolError('query section count greater than 1')
 
     def _handle_xfrin_responses(self):
         read_next_msg = True
@@ -805,8 +929,8 @@ class XfrinConnection(asyncore.dispatcher):
         return False
 
 def __process_xfrin(server, zone_name, rrclass, db_file,
-                  shutdown_event, master_addrinfo, check_soa, tsig_key,
-                  request_type, conn_class):
+                    shutdown_event, master_addrinfo, check_soa, tsig_key,
+                    request_type, conn_class):
     conn = None
     exception = None
     ret = XFRIN_FAIL
@@ -837,11 +961,9 @@ def __process_xfrin(server, zone_name, rrclass, db_file,
         while retry:
             retry = False
             conn = conn_class(sock_map, zone_name, rrclass, datasrc_client,
-                              shutdown_event, master_addrinfo, tsig_key)
+                              shutdown_event, master_addrinfo, db_file,
+                              tsig_key)
             conn.init_socket()
-            # XXX: We still need _db_file for temporary workaround in _create_query().
-            # This should be removed when we eliminate the need for the workaround.
-            conn._db_file = db_file
             ret = XFRIN_FAIL
             if conn.connect_to_master():
                 ret = conn.do_xfrin(check_soa, request_type)
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 86cdec3..5e182d8 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -15,18 +15,63 @@
 # No namespace declaration - these constants go in the global namespace
 # of the xfrin messages python module.
 
+% XFRIN_ZONE_CREATED Zone %1 not found in the given data source, newly created
+On starting an xfrin session, it is identified that the zone to be
+transferred is not found in the data source.  This can happen if a
+secondary DNS server first tries to perform AXFR from a primary server
+without creating the zone image beforehand (e.g. by b10-loadzone).  As
+of this writing the xfrin process provides backward compatible
+behavior to previous versions: creating a new one in the data source
+not to surprise existing users too much.  This is probably not a good
+idea, however, in terms of who should be responsible for managing
+zones at a higher level.  In future it is more likely that a separate
+zone management framework is provided, and the situation where the
+given zone isn't found in xfrout will be treated as an error.
+
+% XFRIN_ZONE_NO_SOA Zone %1 does not have SOA
+On starting an xfrin session, it is identified that the zone to be
+transferred does not have an SOA RR in the data source.  This is not
+necessarily an error; if a secondary DNS server first tries to perform
+transfer from a primary server, the zone can be empty, and therefore
+doesn't have an SOA.  Subsequent AXFR will fill in the zone; if the
+attempt is IXFR it will fail in query creation.
+
+% XFRIN_ZONE_MULTIPLE_SOA Zone %1 has %2 SOA RRs
+On starting an xfrin session, it is identified that the zone to be
+transferred has multiple SOA RRs.  Such a zone is broken, but could be
+accidentally configured especially in a data source using "non
+captive" backend database.  The implementation ignores entire SOA RRs
+and tries to continue processing as if the zone were empty.  This
+means subsequent AXFR can succeed and possibly replace the zone with
+valid content, but an IXFR attempt will fail.
+
+% XFRIN_ZONE_SERIAL_AHEAD Serial number (%1) for %2 received from master %3 < ours (%4)
+The response to an SOA query prior to xfr indicated that the zone's
+SOA serial at the primary server is smaller than that of the xfrin
+client.  This is not necessarily an error especially if that
+particular primary server is another secondary server which hasn't got
+the latest version of the zone.  But if the primary server is known to
+be the real source of the zone, some unexpected inconsistency may have
+happened, and you may want to take a closer look.  In this case xfrin
+doesn't perform subsequent zone transfer.
+
 % XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3
 The XFR transfer for the given zone has failed due to a problem outside
 of the xfrin module.  Possible reasons are a broken DNS message or failure
 in database connection.  The error is shown in the log message.
 
-% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.  Note: due to the code structure
-this can only happen for AXFR.
-
-% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3
-The XFR transfer for the given zone has failed due to a protocol error.
+% XFRIN_XFR_TRANSFER_PROTOCOL_ERROR %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to a protocol
+error, such as an unexpected response from the primary server.  The
+error is shown in the log message.  It may be because the primary
+server implementation is broken or (although less likely) there was
+some attack attempt, but it can also happen due to configuration
+mismatch such as the remote server does not have authority for the
+zone any more but the local configuration hasn't been updated.  So it
+is recommended to check the primary server configuration.
+
+% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to an internal error.
 The error is shown in the log message.
 
 % XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1
@@ -118,6 +163,16 @@ daemon will now shut down.
 An uncaught exception was raised while running the xfrin daemon. The
 exception message is printed in the log message.
 
+% XFRIN_IXFR_UPTODATE IXFR requested serial for %1 is %2, master has %3, not updating
+The first SOA record in an IXFR response indicates the zone's serial
+at the primary server is not newer than the client's.  This is
+basically unexpected event because normally the client first checks
+the SOA serial by an SOA query, but can still happen if the transfer
+is manually invoked or (although unlikely) there is a rapid change at
+the primary server between the SOA and IXFR queries.  The client
+implementation confirms the whole response is this single SOA, and
+aborts the transfer just like a successful case.
+
 % XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
 In an attempt of IXFR processing, the begenning SOA of the first difference
 (following the initial SOA that specified the final SOA for all the
diff --git a/src/bin/xfrout/b10-xfrout.8 b/src/bin/xfrout/b10-xfrout.8
index c8b4b07..c810c2f 100644
--- a/src/bin/xfrout/b10-xfrout.8
+++ b/src/bin/xfrout/b10-xfrout.8
@@ -71,6 +71,19 @@ The configurable settings are:
 defines the maximum number of outgoing zone transfers that can run concurrently\&. The default is 10\&.
 .PP
 
+\fItsig_key_ring\fR
+A list of TSIG keys (each of which is in the form of name:base64\-key[:algorithm]) used for access control on transfer requests\&. The default is an empty list\&.
+.PP
+
+\fItransfer_acl\fR
+A list of ACL elements that apply to all transfer requests by default (unless overridden in zone_config)\&. See the BIND 10 guide for configuration examples\&. The default is an element that allows any transfer requests\&.
+.PP
+
+\fIzone_config\fR
+A list of JSON objects (i\&.e\&. maps) that define per zone configuration concerning
+\fBb10\-xfrout\fR\&. The supported names of each object are "origin" (the origin name of the zone), "class" (the RR class of the zone, optional, default to "IN"), and "acl_element" (ACL only applicable to transfer requests for that zone)\&. See the BIND 10 guide for configuration examples\&. The default is an empty list, that is, no zone specific configuration\&.
+.PP
+
 \fIlog_name\fR
 .PP
 
diff --git a/src/bin/xfrout/b10-xfrout.xml b/src/bin/xfrout/b10-xfrout.xml
index 9889b80..4f6a7fa 100644
--- a/src/bin/xfrout/b10-xfrout.xml
+++ b/src/bin/xfrout/b10-xfrout.xml
@@ -98,6 +98,31 @@
       that can run concurrently. The default is 10.
     </para>
     <para>
+      <varname>tsig_key_ring</varname>
+      A list of TSIG keys (each of which is in the form of
+      name:base64-key[:algorithm]) used for access control on transfer
+      requests.
+      The default is an empty list.
+    </para>
+    <para>
+      <varname>transfer_acl</varname>
+      A list of ACL elements that apply to all transfer requests by
+      default (unless overridden in zone_config).  See the BIND 10
+      guide for configuration examples.
+      The default is an element that allows any transfer requests.
+    </para>
+    <para>
+      <varname>zone_config</varname>
+      A list of JSON objects (i.e. maps) that define per zone
+      configuration concerning <command>b10-xfrout</command>.
+      The supported names of each object are "origin" (the origin
+      name of the zone), "class" (the RR class of the zone, optional,
+      default to "IN"), and "acl_element" (ACL only applicable to
+      transfer requests for that zone).
+      See the BIND 10 guide for configuration examples.
+      The default is an empty list, that is, no zone specific configuration.
+    </para>
+    <para>
       <varname>log_name</varname>
 <!-- TODO -->
     </para>
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index 509df79..ad6d7e6 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -3,8 +3,8 @@ PYTESTS = xfrout_test.py
 noinst_SCRIPTS = $(PYTESTS)
 
 EXTRA_DIST = testdata/test.sqlite3
-# This one is actually not necessary, but added for reference
-EXTRA_DIST += testdata/example.com
+# These are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/creatediff.py
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
diff --git a/src/bin/xfrout/tests/testdata/creatediff.py b/src/bin/xfrout/tests/testdata/creatediff.py
new file mode 100755
index 0000000..dab6622
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/creatediff.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3.1
+
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''This script was used to create zone differences for IXFR tests.
+
+The result was stored in the test SQLite3 database file, so this script
+itself isn't necessary for testing.  It's provided here for reference
+purposes.
+
+'''
+
+import isc.datasrc
+import isc.log
+from isc.dns import *
+from isc.testutils.rrset_utils import *
+
+isc.log.init("dummy")           # XXX
+
+ZONE_NAME = Name('example.com')
+NS_NAME_STR = 'a.dns.example.com'
+NS_NAME = Name(NS_NAME_STR)
+
+client = isc.datasrc.DataSourceClient('sqlite3',
+                                      '{ "database_file": "test.sqlite3" }')
+
+# Install the initial data
+updater = client.get_updater(ZONE_NAME, True)
+updater.add_rrset(create_soa(2011111802))
+updater.add_rrset(create_ns(NS_NAME_STR))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.add_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.commit()
+
+# Incremental update to generate diffs
+updater = client.get_updater(ZONE_NAME, False, True)
+updater.delete_rrset(create_soa(2011111802))
+updater.add_rrset(create_soa(2011111900))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.2', 7200))
+updater.delete_rrset(create_soa(2011111900))
+updater.delete_rrset(create_a(NS_NAME, '192.0.2.53'))
+updater.delete_rrset(create_aaaa(NS_NAME, '2001:db8::1'))
+updater.add_rrset(create_soa(2011112001))
+updater.add_rrset(create_a(NS_NAME, '192.0.2.1'))
+updater.commit()
diff --git a/src/bin/xfrout/tests/testdata/example.com b/src/bin/xfrout/tests/testdata/example.com
index 25c5e6a..8458d09 100644
--- a/src/bin/xfrout/tests/testdata/example.com
+++ b/src/bin/xfrout/tests/testdata/example.com
@@ -1,6 +1,6 @@
 ;; This is the source of a zone stored in test.sqlite3.  It's provided
 ;; for reference purposes only.
-example.com.         3600  IN  SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com.         3600  IN  SOA master.example.com. admin.example.com. 2011112001 3600 1800 2419200 7200
 example.com.         3600  IN  NS  a.dns.example.com.
 a.dns.example.com.   3600  IN  A    192.0.2.1
 a.dns.example.com.   7200  IN  A    192.0.2.2
diff --git a/src/bin/xfrout/tests/testdata/test.sqlite3 b/src/bin/xfrout/tests/testdata/test.sqlite3
index af491f5..9eb14f1 100644
Binary files a/src/bin/xfrout/tests/testdata/test.sqlite3 and b/src/bin/xfrout/tests/testdata/test.sqlite3 differ
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 8394b0a..ea4de27 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -22,6 +22,7 @@ from isc.testutils.tsigctx_mock import MockTSIGContext
 from isc.cc.session import *
 import isc.config
 from isc.dns import *
+from isc.testutils.rrset_utils import *
 from xfrout import *
 import xfrout
 import isc.log
@@ -30,6 +31,16 @@ import isc.acl.dns
 TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
 TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
 
+#
+# Commonly used (mostly constant) test parameters
+#
+TEST_ZONE_NAME_STR = "example.com."
+TEST_ZONE_NAME = Name(TEST_ZONE_NAME_STR)
+TEST_RRCLASS = RRClass.IN()
+IXFR_OK_VERSION = 2011111802
+IXFR_NG_VERSION = 2011112800
+SOA_CURRENT_VERSION = 2011112001
+
 # our fake socket, where we can read and insert messages
 class MySocket():
     def __init__(self, family, type):
@@ -56,10 +67,12 @@ class MySocket():
         self.sendqueue = self.sendqueue[size:]
         return result
 
-    def read_msg(self, parse_options=Message.PARSE_DEFAULT):
+    def read_msg(self, parse_options=Message.PARSE_DEFAULT, need_len=False):
         sent_data = self.readsent()
         get_msg = Message(Message.PARSE)
         get_msg.from_wire(bytes(sent_data[2:]), parse_options)
+        if need_len:
+            return (get_msg, len(sent_data) - 2)
         return get_msg
 
     def clear_send(self):
@@ -69,6 +82,38 @@ class MockDataSrcClient:
     def __init__(self, type, config):
         pass
 
+    def find_zone(self, zone_name):
+        '''Mock version of find_zone().
+
+        It returns itself (subsequently acting as a mock ZoneFinder) for
+        some test zone names.  For a special name it returns NOTFOUND to
+        emulate the condition where the specified zone doen't exist.
+
+        '''
+        self._zone_name = zone_name
+        if zone_name == Name('notauth.example.com'):
+            return (isc.datasrc.DataSourceClient.NOTFOUND, None)
+        return (isc.datasrc.DataSourceClient.SUCCESS, self)
+
+    def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
+        '''Mock ZoneFinder.find().
+
+        (At the moment) this method only handles query for type SOA.
+        By default it returns a normal SOA RR(set) whose owner name is
+        the query name  It also emulates some unusual cases for special
+        zone names.
+
+        '''
+        if name == Name('nosoa.example.com') and rrtype == RRType.SOA():
+            return (ZoneFinder.NXDOMAIN, None)
+        elif name == Name('multisoa.example.com') and rrtype == RRType.SOA():
+            soa_rrset = create_soa(SOA_CURRENT_VERSION)
+            soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
+            return (ZoneFinder.SUCCESS, soa_rrset)
+        elif rrtype == RRType.SOA():
+            return (ZoneFinder.SUCCESS, create_soa(SOA_CURRENT_VERSION))
+        raise ValueError('Unexpected input to mock finder: bug in test case?')
+
     def get_iterator(self, zone_name, adjust_ttl=False):
         if zone_name == Name('notauth.example.com'):
             raise isc.datasrc.Error('no such zone')
@@ -78,19 +123,20 @@ class MockDataSrcClient:
     def get_soa(self):  # emulate ZoneIterator.get_soa()
         if self._zone_name == Name('nosoa.example.com'):
             return None
-        soa_rrset = RRset(self._zone_name, RRClass.IN(), RRType.SOA(),
-                          RRTTL(3600))
-        soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                                  'master.example.com. ' +
-                                  'admin.example.com. 1234 ' +
-                                  '3600 1800 2419200 7200'))
+        soa_rrset = create_soa(SOA_CURRENT_VERSION)
         if self._zone_name == Name('multisoa.example.com'):
-            soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                                      'master.example.com. ' +
-                                      'admin.example.com. 1300 ' +
-                                      '3600 1800 2419200 7200'))
+            soa_rrset.add_rdata(soa_rrset.get_rdata()[0])
         return soa_rrset
 
+    def get_journal_reader(self, zone_name, begin_serial, end_serial):
+        if zone_name == Name('notauth2.example.com'):
+            return isc.datasrc.ZoneJournalReader.NO_SUCH_ZONE, None
+        if zone_name == Name('nojournal.example.com'):
+            raise isc.datasrc.NotImplemented('journaling not supported')
+        if begin_serial == IXFR_NG_VERSION:
+            return isc.datasrc.ZoneJournalReader.NO_SUCH_VERSION, None
+        return isc.datasrc.ZoneJournalReader.SUCCESS, self
+
 class MyCCSession(isc.config.ConfigData):
     def __init__(self):
         module_spec = isc.config.module_spec_from_file(
@@ -159,15 +205,44 @@ class TestXfroutSessionBase(unittest.TestCase):
     def message_has_tsig(self, msg):
         return msg.get_tsig_record() is not None
 
-    def create_request_data(self, with_question=True, with_tsig=False):
+    def create_request_data(self, with_question=True, with_tsig=False,
+                            ixfr=None, qtype=None, zone_name=TEST_ZONE_NAME,
+                            soa_class=TEST_RRCLASS, num_soa=1):
+        '''Create a commonly used XFR request data.
+
+        By default the request type is AXFR; if 'ixfr' is an integer,
+        the request type will be IXFR and an SOA with the serial being
+        the value of the parameter will be included in the authority
+        section.
+
+        This method has various minor parameters only for creating bad
+        format requests for testing purposes:
+        qtype: the RR type of the question section.  By default automatically
+               determined by the value of ixfr, but could be an invalid type
+               for testing.
+        zone_name: the query (zone) name.  for IXFR, it's also used as
+                   the owner name of the SOA in the authority section.
+        soa_class: IXFR only.  The RR class of the SOA RR in the authority
+                   section.
+        num_soa: IXFR only.  The number of SOA RDATAs in the authority
+                 section.
+        '''
         msg = Message(Message.RENDER)
         query_id = 0x1035
         msg.set_qid(query_id)
         msg.set_opcode(Opcode.QUERY())
         msg.set_rcode(Rcode.NOERROR())
+        req_type = RRType.AXFR() if ixfr is None else RRType.IXFR()
         if with_question:
-            msg.add_question(Question(Name("example.com"), RRClass.IN(),
-                                      RRType.AXFR()))
+            msg.add_question(Question(zone_name, RRClass.IN(),
+                                      req_type if qtype is None else qtype))
+        if req_type == RRType.IXFR():
+            soa = RRset(zone_name, soa_class, RRType.SOA(), RRTTL(0))
+            # In the RDATA only the serial matters.
+            for i in range(0, num_soa):
+                soa.add_rdata(Rdata(RRType.SOA(), soa_class,
+                                    'm r ' + str(ixfr) + ' 1 1 1 1'))
+            msg.add_rrset(Message.SECTION_AUTHORITY, soa)
 
         renderer = MessageRenderer()
         if with_tsig:
@@ -178,6 +253,13 @@ class TestXfroutSessionBase(unittest.TestCase):
         request_data = renderer.get_data()
         return request_data
 
+    def set_request_type(self, type):
+        self.xfrsess._request_type = type
+        if type == RRType.AXFR():
+            self.xfrsess._request_typestr = 'AXFR'
+        else:
+            self.xfrsess._request_typestr = 'IXFR'
+
     def setUp(self):
         self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
         self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
@@ -188,13 +270,9 @@ class TestXfroutSessionBase(unittest.TestCase):
                                        isc.acl.dns.REQUEST_LOADER.load(
                                            [{"action": "ACCEPT"}]),
                                        {})
+        self.set_request_type(RRType.AXFR()) # test AXFR by default
         self.mdata = self.create_request_data()
-        self.soa_rrset = RRset(Name('example.com'), RRClass.IN(), RRType.SOA(),
-                               RRTTL(3600))
-        self.soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                                       'master.Example.com. ' +
-                                       'admin.exAmple.com. ' +
-                                       '1234 3600 1800 2419200 7200'))
+        self.soa_rrset = create_soa(SOA_CURRENT_VERSION)
         # some test replaces a module-wide function.  We should ensure the
         # original is used elsewhere.
         self.orig_get_rrset_len = xfrout.get_rrset_len
@@ -222,7 +300,7 @@ class TestXfroutSession(TestXfroutSessionBase):
         # set up a bogus request, which should result in FORMERR. (it only
         # has to be something that is different from the previous case)
         self.xfrsess._request_data = \
-            self.create_request_data(with_question=False)
+            self.create_request_data(ixfr=IXFR_OK_VERSION, num_soa=2)
         # Replace the data source client to avoid datasrc related exceptions
         self.xfrsess.ClientClass = MockDataSrcClient
         XfroutSession._handle(self.xfrsess)
@@ -241,13 +319,29 @@ class TestXfroutSession(TestXfroutSessionBase):
         XfroutSession._handle(self.xfrsess)
 
     def test_parse_query_message(self):
+        # Valid AXFR
         [get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
+        self.assertEqual(RRType.AXFR(), self.xfrsess._request_type)
         self.assertEqual(get_rcode.to_text(), "NOERROR")
 
+        # Valid IXFR
+        request_data = self.create_request_data(ixfr=2011111801)
+        rcode, msg = self.xfrsess._parse_query_message(request_data)
+        self.assertEqual(RRType.IXFR(), self.xfrsess._request_type)
+        self.assertEqual(Rcode.NOERROR(), rcode)
+
         # Broken request: no question
-        request_data = self.create_request_data(with_question=False)
+        self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+                          self.create_request_data(with_question=False))
+
+        # Broken request: invalid RR type (neither AXFR nor IXFR)
+        self.assertRaises(RuntimeError, self.xfrsess._parse_query_message,
+                          self.create_request_data(qtype=RRType.A()))
+
+        # NOERROR
+        request_data = self.create_request_data(ixfr=IXFR_OK_VERSION)
         rcode, msg = self.xfrsess._parse_query_message(request_data)
-        self.assertEqual(Rcode.FORMERR(), rcode)
+        self.assertEqual(rcode.to_text(), "NOERROR")
 
         # tsig signed query message
         request_data = self.create_request_data(with_tsig=True)
@@ -436,7 +530,7 @@ class TestXfroutSession(TestXfroutSessionBase):
                                RRTTL(3600))
         soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
                                   'master.Example.com. admin.exAmple.com. ' +
-                                  '1234 3600 1800 2419200 7200'))
+                                  '2011112001 3600 1800 2419200 7200'))
         msg.add_rrset(Message.SECTION_ANSWER, soa_rrset)
         self.xfrsess._send_message(self.sock, msg)
         send_out_data = self.sock.readsent()[2:]
@@ -470,34 +564,28 @@ class TestXfroutSession(TestXfroutSessionBase):
         msg = self.getmsg()
         msg.make_response()
 
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset, 0,
-                                                 packet_neet_not_sign)
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
-        # tsig context is not exist
+        # tsig context does not exist
         self.assertFalse(self.message_has_tsig(get_msg))
 
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        #answer_rrset_iter = section_iter(get_msg, section.ANSWER())
-        answer = get_msg.get_section(Message.SECTION_ANSWER)[0]#answer_rrset_iter.get_rrset()
+        answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
         self.assertEqual(answer.get_name().to_text(), "example.com.")
         self.assertEqual(answer.get_class(), RRClass("IN"))
         self.assertEqual(answer.get_type().to_text(), "SOA")
         rdata = answer.get_rdata()
         self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
 
-        # msg is the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
+        # Sending the message with last soa together
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset, 0,
-                                                 TSIG_SIGN_EVERY_NTH)
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
-        # tsig context is not exist
+        # tsig context does not exist
         self.assertFalse(self.message_has_tsig(get_msg))
 
     def test_send_message_with_last_soa_with_tsig(self):
@@ -507,13 +595,9 @@ class TestXfroutSession(TestXfroutSessionBase):
         msg = self.getmsg()
         msg.make_response()
 
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
-        # msg is not the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
+        # Sending the message with last soa together
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset, 0,
-                                                 packet_neet_not_sign)
+                                                 self.soa_rrset, 0)
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
 
@@ -521,14 +605,6 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        # msg is the TSIG_SIGN_EVERY_NTH one
-        # sending the message with last soa together
-        self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset, 0,
-                                                 TSIG_SIGN_EVERY_NTH)
-        get_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(get_msg))
-
     def test_trigger_send_message_with_last_soa(self):
         rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
         rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
@@ -540,8 +616,6 @@ class TestXfroutSession(TestXfroutSessionBase):
         # length larger than MAX-len(rrset)
         length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
             get_rrset_len(self.soa_rrset) + 1
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
 
         # give the function a value that is larger than MAX-len(rrset)
         # this should have triggered the sending of two messages
@@ -549,8 +623,7 @@ class TestXfroutSession(TestXfroutSessionBase):
         # the sending in _with_last_soa)
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
                                                  self.soa_rrset,
-                                                 length_need_split,
-                                                 packet_neet_not_sign)
+                                                 length_need_split)
         get_msg = self.sock.read_msg()
         self.assertFalse(self.message_has_tsig(get_msg))
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_QUESTION), 1)
@@ -570,7 +643,6 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_ANSWER), 1)
         self.assertEqual(get_msg.get_rr_count(Message.SECTION_AUTHORITY), 0)
 
-        #answer_rrset_iter = section_iter(get_msg, Message.SECTION_ANSWER)
         answer = get_msg.get_section(Message.SECTION_ANSWER)[0]
         self.assertEqual(answer.get_name().to_text(), "example.com.")
         self.assertEqual(answer.get_class(), RRClass("IN"))
@@ -590,8 +662,6 @@ class TestXfroutSession(TestXfroutSessionBase):
         # length larger than MAX-len(rrset)
         length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
             get_rrset_len(self.soa_rrset) + 1
-        # packet number less than TSIG_SIGN_EVERY_NTH
-        packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
 
         # give the function a value that is larger than MAX-len(rrset)
         # this should have triggered the sending of two messages
@@ -599,26 +669,10 @@ class TestXfroutSession(TestXfroutSessionBase):
         # the sending in _with_last_soa)
         self.xfrsess._send_message_with_last_soa(msg, self.sock,
                                                  self.soa_rrset,
-                                                 length_need_split,
-                                                 packet_neet_not_sign)
-        get_msg = self.sock.read_msg()
-        # msg is not the TSIG_SIGN_EVERY_NTH one, it shouldn't be tsig signed
-        self.assertFalse(self.message_has_tsig(get_msg))
-        # the last packet should be tsig signed
+                                                 length_need_split)
+        # Both messages should have TSIG RRs
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
-        # and it should not have sent anything else
-        self.assertEqual(0, len(self.sock.sendqueue))
-
-
-        # msg is the TSIG_SIGN_EVERY_NTH one, it should be tsig signed
-        self.xfrsess._send_message_with_last_soa(msg, self.sock,
-                                                 self.soa_rrset,
-                                                 length_need_split,
-                                                 xfrout.TSIG_SIGN_EVERY_NTH)
-        get_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(get_msg))
-        # the last packet should be tsig signed
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
         # and it should not have sent anything else
@@ -627,16 +681,101 @@ class TestXfroutSession(TestXfroutSessionBase):
     def test_get_rrset_len(self):
         self.assertEqual(82, get_rrset_len(self.soa_rrset))
 
-    def test_check_xfrout_available(self):
+    def test_xfrout_axfr_setup(self):
+        self.xfrsess.ClientClass = MockDataSrcClient
+        # Successful case.  A zone iterator should be set up.
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+
+        # Failure cases
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('notauth.example.com'), TEST_RRCLASS),
+                         Rcode.NOTAUTH())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('nosoa.example.com'), TEST_RRCLASS),
+                         Rcode.SERVFAIL())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), Name('multisoa.example.com'), TEST_RRCLASS),
+                         Rcode.SERVFAIL())
+
+    def test_xfrout_ixfr_setup(self):
         self.xfrsess.ClientClass = MockDataSrcClient
-        self.assertEqual(self.xfrsess._check_xfrout_available(
-                Name('example.com')), Rcode.NOERROR())
-        self.assertEqual(self.xfrsess._check_xfrout_available(
-                Name('notauth.example.com')), Rcode.NOTAUTH())
-        self.assertEqual(self.xfrsess._check_xfrout_available(
-                Name('nosoa.example.com')), Rcode.SERVFAIL())
-        self.assertEqual(self.xfrsess._check_xfrout_available(
-                Name('multisoa.example.com')), Rcode.SERVFAIL())
+        self.set_request_type(RRType.IXFR())
+
+        # Successful case of pure IXFR.  A zone journal reader should be set
+        # up.
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._jnl_reader)
+
+        # Successful case, but as a result of falling back to AXFR-style
+        # IXFR.  A zone iterator should be set up instead of a journal reader.
+        self.mdata = self.create_request_data(ixfr=IXFR_NG_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+        self.assertEqual(None, self.xfrsess._jnl_reader)
+
+        # Successful case, but the requested SOA serial is equal to that of
+        # the local SOA.  Both iterator and jnl_reader should be None,
+        # indicating that the response will contain just one SOA.
+        self.mdata = self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), TEST_ZONE_NAME, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertEqual(None, self.xfrsess._iterator)
+        self.assertEqual(None, self.xfrsess._jnl_reader)
+
+        # The data source doesn't support journaling.  Should fallback to AXFR.
+        zone_name = Name('nojournal.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOERROR())
+        self.assertNotEqual(None, self.xfrsess._iterator)
+
+        # Failure cases
+        zone_name = Name('notauth.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+        # this is a strange case: zone's SOA will be found but the journal
+        # reader won't be created due to 'no such zone'.
+        zone_name = Name('notauth2.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.NOTAUTH())
+        zone_name = Name('nosoa.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+        zone_name = Name('multisoa.example.com')
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              zone_name=zone_name)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.SERVFAIL())
+
+        # query name doesn't match the SOA's owner
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+        # query's RR class doesn't match the SOA's class
+        zone_name = TEST_ZONE_NAME # make sure the name matches this time
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              soa_class=RRClass.CH())
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
+
+        # multiple SOA RRs
+        self.mdata = self.create_request_data(ixfr=IXFR_OK_VERSION,
+                                              num_soa=2)
+        self.assertEqual(self.xfrsess._xfrout_setup(
+                self.getmsg(), zone_name, TEST_RRCLASS), Rcode.FORMERR())
 
     def test_dns_xfrout_start_formerror(self):
         # formerror
@@ -644,13 +783,10 @@ class TestXfroutSession(TestXfroutSessionBase):
         sent_data = self.sock.readsent()
         self.assertEqual(len(sent_data), 0)
 
-    def default(self, param):
-        return "example.com"
-
     def test_dns_xfrout_start_notauth(self):
-        def notauth(formpara):
+        def notauth(msg, name, rrclass):
             return Rcode.NOTAUTH()
-        self.xfrsess._check_xfrout_available = notauth
+        self.xfrsess._xfrout_setup = notauth
         self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
         get_msg = self.sock.read_msg()
         self.assertEqual(get_msg.get_rcode().to_text(), "NOTAUTH")
@@ -663,9 +799,9 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL())
 
     def test_dns_xfrout_start_noerror(self):
-        def noerror(form):
+        def noerror(msg, name, rrclass):
             return Rcode.NOERROR()
-        self.xfrsess._check_xfrout_available = noerror
+        self.xfrsess._xfrout_setup = noerror
 
         def myreply(msg, sock):
             self.sock.send(b"success")
@@ -674,14 +810,14 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
         self.assertEqual(self.sock.readsent(), b"success")
 
-    def test_reply_xfrout_query_noerror(self):
+    def test_reply_xfrout_query_axfr(self):
         self.xfrsess._soa = self.soa_rrset
         self.xfrsess._iterator = [self.soa_rrset]
         self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
         reply_msg = self.sock.read_msg()
         self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
 
-    def test_reply_xfrout_query_noerror_with_tsig(self):
+    def test_reply_xfrout_query_axfr_with_tsig(self):
         rrset = RRset(Name('a.example.com'), RRClass.IN(), RRType.A(),
                       RRTTL(3600))
         rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), '192.0.2.1'))
@@ -697,28 +833,195 @@ class TestXfroutSession(TestXfroutSessionBase):
         self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
         self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
 
-        # tsig signed first package
-        reply_msg = self.sock.read_msg()
-        self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
-        self.assertTrue(self.message_has_tsig(reply_msg))
-        # (TSIG_SIGN_EVERY_NTH - 1) packets have no tsig
-        for i in range(0, xfrout.TSIG_SIGN_EVERY_NTH - 1):
+        # All messages must have TSIG as we don't support the feature of
+        # skipping intermediate TSIG records (with bulk signing).
+        for i in range(0, 102): # 102 = all 100 RRs from iterator and 2 SOAs
             reply_msg = self.sock.read_msg()
-            self.assertFalse(self.message_has_tsig(reply_msg))
-        # TSIG_SIGN_EVERY_NTH packet has tsig
-        reply_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(reply_msg))
-
-        for i in range(0, 100 - TSIG_SIGN_EVERY_NTH):
-            reply_msg = self.sock.read_msg()
-            self.assertFalse(self.message_has_tsig(reply_msg))
-        # tsig signed last package
-        reply_msg = self.sock.read_msg()
-        self.assertTrue(self.message_has_tsig(reply_msg))
+            # With the hack of get_rrset_len() above, every message must have
+            # exactly one RR in the answer section.
+            self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 1)
+            self.assertTrue(self.message_has_tsig(reply_msg))
 
         # and it should not have sent anything else
         self.assertEqual(0, len(self.sock.sendqueue))
 
+    def test_reply_xfrout_query_ixfr(self):
+        # Creating a pure (incremental) IXFR response.  Intermediate SOA
+        # RRs won't be skipped.
+        self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+        self.xfrsess._iterator = [create_soa(IXFR_OK_VERSION),
+                                  create_a(Name('a.example.com'), '192.0.2.2'),
+                                  create_soa(SOA_CURRENT_VERSION),
+                                  create_aaaa(Name('a.example.com'),
+                                              '2001:db8::1')]
+        self.xfrsess._jnl_reader = self.xfrsess._iterator
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+        actual_records = reply_msg.get_section(Message.SECTION_ANSWER)
+
+        expected_records = self.xfrsess._iterator[:]
+        expected_records.insert(0, create_soa(SOA_CURRENT_VERSION))
+        expected_records.append(create_soa(SOA_CURRENT_VERSION))
+
+        self.assertEqual(len(expected_records), len(actual_records))
+        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+    def test_reply_xfrout_query_axfr_maxlen(self):
+        # The test RR(set) has the length of 65535 - 12 (size of hdr) bytes:
+        # owner name = 1 (root), fixed fields (type,class,TTL,RDLEN) = 10
+        # RDATA = 65512 (= 65535 - 12 - 1 - 10)
+        self.xfrsess._soa = self.soa_rrset
+        test_rr = create_generic(Name('.'), 65512)
+        self.xfrsess._iterator = [self.soa_rrset, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        # The first message should contain the beginning SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # The second message should contain the beginning SOA, and only that RR
+        # The wire format data should have the possible maximum size.
+        r, rlen = self.sock.read_msg(need_len=True)
+        self.assertEqual(65535, rlen)
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(test_rr,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # The third message should contain the ending SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+
+        # there should be no more message
+        self.assertEqual(0, len(self.sock.sendqueue))
+
+    def maxlen_test_common_setup(self, tsig=False):
+        '''Common initialization for some of the tests below
+
+        For those tests we use '.' for all owner names and names in RDATA
+        to avoid having unexpected results due to compression.  It returns
+        the created SOA for convenience.
+
+        If tsig is True, also setup TSIG (mock) context.  In our test cases
+        the size of the TSIG RR is 81 bytes (key name = example.com,
+        algorithm = hmac-md5)
+
+        '''
+        soa = RRset(Name('.'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), '. . 0 0 0 0 0'))
+        self.mdata = self.create_request_data(zone_name=Name('.'))
+        self.xfrsess._soa = soa
+        if tsig:
+            self.xfrsess._tsig_ctx = \
+                self.create_mock_tsig_ctx(TSIGError.NOERROR)
+            self.xfrsess._tsig_len = 81
+        return soa
+
+    def maxlen_test_common_checks(self, soa_rr, test_rr, expected_n_rr):
+        '''A set of common assertion checks for some tests below.
+
+        In all cases two AXFR response messages should have been created.
+        expected_n_rr is a list of two elements, each specifies the expected
+        number of answer RRs for each message: expected_n_rr[0] is the expected
+        number of the first answer RRs; expected_n_rr[1] is the expected number
+        of the second answer RRs.  The message that contains two RRs should
+        have the maximum possible wire length (65535 bytes).  And, in all
+        cases, the resulting RRs should be in the order of SOA, another RR,
+        SOA.
+
+        '''
+        # Check the first message
+        r, rlen = self.sock.read_msg(need_len=True)
+        if expected_n_rr[0] == 2:
+            self.assertEqual(65535, rlen)
+        self.assertEqual(expected_n_rr[0],
+                         r.get_rr_count(Message.SECTION_ANSWER))
+        actual_rrs = r.get_section(Message.SECTION_ANSWER)[:]
+
+        # Check the second message
+        r, rlen = self.sock.read_msg(need_len=True)
+        if expected_n_rr[1] == 2:
+            self.assertEqual(65535, rlen)
+        self.assertEqual(expected_n_rr[1],
+                         r.get_rr_count(Message.SECTION_ANSWER))
+        actual_rrs.extend(r.get_section(Message.SECTION_ANSWER))
+        for (expected_rr, actual_rr) in zip([soa_rr, test_rr, soa_rr],
+                                            actual_rrs):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+        # there should be no more message
+        self.assertEqual(0, len(self.sock.sendqueue))
+
+    def test_reply_xfrout_query_axfr_maxlen_with_soa(self):
+        # Similar to the 'maxlen' test, but the first message should be
+        # able to contain both SOA and the large RR.
+        soa = self.maxlen_test_common_setup()
+
+        # The first message will contain the question (5 bytes), so the
+        # test RDATA should allow a room for that.
+        test_rr = create_generic(Name('.'), 65512 - 5 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_soa_with_tsig(self):
+        # Similar to the previous case, but with TSIG (whose size is 81 bytes).
+        soa = self.maxlen_test_common_setup(True)
+        test_rr = create_generic(Name('.'), 65512 - 5 - 81 -
+                                 get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_endsoa(self):
+        # Similar to the max w/ soa test, but the first message cannot contain
+        # both SOA and the long RR due to the question section.  The second
+        # message should be able to contain both.
+        soa = self.maxlen_test_common_setup()
+        test_rr = create_generic(Name('.'), 65512 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_endsoa_with_tsig(self):
+        # Similar to the previous case, but with TSIG.
+        soa = self.maxlen_test_common_setup(True)
+        test_rr = create_generic(Name('.'), 65512 - 81 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+    def test_reply_xfrout_query_axfr_toobigdata(self):
+        # Similar to the 'maxlen' test, but the RR doesn't even fit in a
+        # single message.
+        self.xfrsess._soa = self.soa_rrset
+        test_rr = create_generic(Name('.'), 65513) # 1 byte larger than 'max'
+        self.xfrsess._iterator = [self.soa_rrset, test_rr]
+        # the reply method should fail with exception
+        self.assertRaises(XfroutSessionError, self.xfrsess._reply_xfrout_query,
+                          self.getmsg(), self.sock)
+        # The first message should still have been sent and contain the
+        # beginning SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # And there should have been no other messages sent
+        self.assertEqual(0, len(self.sock.sendqueue))
+
+    def test_reply_xfrout_query_ixfr_soa_only(self):
+        # Creating an IXFR response that contains only one RR, which is the
+        # SOA of the current version.
+        self.xfrsess._soa = create_soa(SOA_CURRENT_VERSION)
+        self.xfrsess._iterator = None
+        self.xfrsess._jnl_reader = None
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
+        answer = reply_msg.get_section(Message.SECTION_ANSWER)
+        self.assertEqual(1, len(answer))
+        self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+                                     answer[0]))
 
 class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
     '''Tests for XFR-out sessions using an SQLite3 DB.
@@ -734,19 +1037,82 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
         self.xfrsess._request_data = self.mdata
         self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
             'test.sqlite3'
+        self.ns_name = 'a.dns.example.com'
+
+    def check_axfr_stream(self, response):
+        '''Common checks for AXFR(-style) response for the test zone.
+        '''
+        # This zone contains two A RRs for the same name with different TTLs.
+        # These TTLs should be preseved in the AXFR stream.
+        actual_records = response.get_section(Message.SECTION_ANSWER)
+        self.assertEqual(5, len(actual_records))
+        # The first and last RR should be the expected SOA
+        expected_soa = create_soa(2011112001)
+        self.assertTrue(rrsets_equal(expected_soa, actual_records[0]))
+        self.assertTrue(rrsets_equal(expected_soa, actual_records[-1]))
+
+        # The ordering of the intermediate RRs can differ depending on the
+        # internal details of the SQLite3 library, so we sort them by a simple
+        # rule sufficient for the purpose here, and then compare them.
+        expected_others = [create_ns(self.ns_name),
+                           create_a(Name(self.ns_name), '192.0.2.1', 3600),
+                           create_a(Name(self.ns_name), '192.0.2.2', 7200)]
+        keyfn = lambda x: (x.get_type(), x.get_ttl())
+        for (expected_rr, actual_rr) in zip(sorted(expected_others, key=keyfn),
+                                            sorted(actual_records[1:4],
+                                                   key=keyfn)):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
 
     def test_axfr_normal_session(self):
         XfroutSession._handle(self.xfrsess)
         response = self.sock.read_msg(Message.PRESERVE_ORDER);
         self.assertEqual(Rcode.NOERROR(), response.get_rcode())
-        # This zone contains two A RRs for the same name with different TTLs.
-        # These TTLs should be preseved in the AXFR stream.
-        actual_ttls = []
-        for rr in response.get_section(Message.SECTION_ANSWER):
-            if rr.get_type() == RRType.A() and \
-                    not rr.get_ttl() in actual_ttls:
-                actual_ttls.append(rr.get_ttl().get_value())
-        self.assertEqual([3600, 7200], sorted(actual_ttls))
+        self.check_axfr_stream(response)
+
+    def test_ixfr_to_axfr(self):
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=IXFR_NG_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+        # This is an AXFR-style IXFR.  So the question section should indicate
+        # that it's an IXFR resposne.
+        self.assertEqual(RRType.IXFR(), response.get_question()[0].get_type())
+        self.check_axfr_stream(response)
+
+    def test_ixfr_normal_session(self):
+        # See testdata/creatediff.py.  There are 8 changes between two
+        # versions.  So the answer section should contain all of these and
+        # two beginning and trailing SOAs.
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=IXFR_OK_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        actual_records = response.get_section(Message.SECTION_ANSWER)
+        expected_records = [create_soa(2011112001), create_soa(2011111802),
+                            create_soa(2011111900),
+                            create_a(Name(self.ns_name), '192.0.2.2', 7200),
+                            create_soa(2011111900),
+                            create_a(Name(self.ns_name), '192.0.2.53'),
+                            create_aaaa(Name(self.ns_name), '2001:db8::1'),
+                            create_soa(2011112001),
+                            create_a(Name(self.ns_name), '192.0.2.1'),
+                            create_soa(2011112001)]
+        self.assertEqual(len(expected_records), len(actual_records))
+        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+    def test_ixfr_soa_only(self):
+        # The requested SOA serial is the latest one.  The response should
+        # contain exactly one SOA of that serial.
+        self.xfrsess._request_data = \
+            self.create_request_data(ixfr=SOA_CURRENT_VERSION)
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        answers = response.get_section(Message.SECTION_ANSWER)
+        self.assertEqual(1, len(answers))
+        self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+                                     answers[0]))
 
 class MyUnixSockServer(UnixSockServer):
     def __init__(self):
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index 32dabbf..310a0aa 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -22,7 +22,7 @@ import isc.cc
 import threading
 import struct
 import signal
-from isc.datasrc import DataSourceClient
+from isc.datasrc import DataSourceClient, ZoneFinder, ZoneJournalReader
 from socketserver import *
 import os
 from isc.config.ccsession import *
@@ -39,6 +39,7 @@ from isc.log_messages.xfrout_messages import *
 
 isc.log.init("b10-xfrout")
 logger = isc.log.Logger("xfrout")
+DBG_XFROUT_TRACE = logger.DBGLVL_TRACE_BASIC
 
 try:
     from libutil_io_python import *
@@ -46,7 +47,7 @@ try:
 except ImportError as e:
     # C++ loadable module may not be installed; even so the xfrout process
     # must keep running, so we warn about it and move forward.
-    log.error(XFROUT_IMPORT, str(e))
+    logger.error(XFROUT_IMPORT, str(e))
 
 from isc.acl.acl import ACCEPT, REJECT, DROP, LoaderError
 from isc.acl.dns import REQUEST_LOADER
@@ -65,6 +66,11 @@ class XfroutConfigError(Exception):
     """
     pass
 
+class XfroutSessionError(Exception):
+    '''An exception raised for some unexpected events during an xfrout session.
+    '''
+    pass
+
 def init_paths():
     global SPECFILE_PATH
     global AUTH_SPECFILE_PATH
@@ -92,10 +98,8 @@ init_paths()
 SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
 AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
 VERBOSE_MODE = False
-# tsig sign every N axfr packets.
-TSIG_SIGN_EVERY_NTH = 96
-
-XFROUT_MAX_MESSAGE_SIZE = 65535
+XFROUT_DNS_HEADER_SIZE = 12     # protocol constant
+XFROUT_MAX_MESSAGE_SIZE = 65535 # ditto
 
 # borrowed from xfrin.py @ #1298.  We should eventually unify it.
 def format_zone_str(zone_name, zone_class):
@@ -105,7 +109,7 @@ def format_zone_str(zone_name, zone_class):
        zone_name (isc.dns.Name) name to format
        zone_class (isc.dns.RRClass) class to format
     """
-    return zone_name.to_text() + '/' + str(zone_class)
+    return zone_name.to_text(True) + '/' + str(zone_class)
 
 # borrowed from xfrin.py @ #1298.
 def format_addrinfo(addrinfo):
@@ -135,6 +139,11 @@ def get_rrset_len(rrset):
     rrset.to_wire(bytes)
     return len(bytes)
 
+def get_soa_serial(soa_rdata):
+    '''Extract the serial field of an SOA RDATA and returns it as an intger.
+    (borrowed from xfrin)
+    '''
+    return int(soa_rdata.to_text().split()[2])
 
 class XfroutSession():
     def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
@@ -146,11 +155,13 @@ class XfroutSession():
         self._tsig_ctx = None
         self._tsig_len = 0
         self._remote = remote
-        self._request_type = 'AXFR' # could be IXFR when we support it
+        self._request_type = None
+        self._request_typestr = None
         self._acl = default_acl
         self._zone_config = zone_config
         self.ClientClass = client_class # parameterize this for testing
-        self._soa = None # will be set in _check_xfrout_available or in tests
+        self._soa = None # will be set in _xfrout_setup or in tests
+        self._jnl_reader = None # will be set to a reader for IXFR
         self._handle()
 
     def create_tsig_ctx(self, tsig_record, tsig_key_ring):
@@ -198,7 +209,8 @@ class XfroutSession():
         tsig_record = msg.get_tsig_record()
         if tsig_record is not None:
             self._tsig_len = tsig_record.get_length()
-            self._tsig_ctx = self.create_tsig_ctx(tsig_record, self._tsig_key_ring)
+            self._tsig_ctx = self.create_tsig_ctx(tsig_record,
+                                                  self._tsig_key_ring)
             tsig_error = self._tsig_ctx.verify(tsig_record, request_data)
             if tsig_error != TSIGError.NOERROR:
                 return Rcode.NOTAUTH()
@@ -221,26 +233,40 @@ class XfroutSession():
             return rcode, msg
 
         # Make sure the question is valid.  This should be ensured by
-        # the auth server, but since it's far from our xfrout itself,
-        # we check it by ourselves.
+        # the auth server, but since it's far from xfrout itself, we check
+        # it by ourselves.  A viloation would be an internal bug, so we
+        # raise and stop here rather than returning a FORMERR or SERVFAIL.
         if msg.get_rr_count(Message.SECTION_QUESTION) != 1:
-            return Rcode.FORMERR(), msg
+            raise RuntimeError('Invalid number of question for XFR: ' +
+                               str(msg.get_rr_count(Message.SECTION_QUESTION)))
+        question = msg.get_question()[0]
+
+        # Identify the request type
+        self._request_type = question.get_type()
+        if self._request_type == RRType.AXFR():
+            self._request_typestr = 'AXFR'
+        elif self._request_type == RRType.IXFR():
+            self._request_typestr = 'IXFR'
+        else:
+            # Likewise, this should be impossible.
+            raise RuntimeError('Unexpected XFR type: ' +
+                               str(self._request_type))
 
         # ACL checks
-        zone_name = msg.get_question()[0].get_name()
-        zone_class = msg.get_question()[0].get_class()
+        zone_name = question.get_name()
+        zone_class = question.get_class()
         acl = self._get_transfer_acl(zone_name, zone_class)
         acl_result = acl.execute(
             isc.acl.dns.RequestContext(self._remote[2], msg.get_tsig_record()))
         if acl_result == DROP:
-            logger.info(XFROUT_QUERY_DROPPED, self._request_type,
-                        format_addrinfo(self._remote),
-                        format_zone_str(zone_name, zone_class))
+            logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_DROPPED,
+                         self._request_type, format_addrinfo(self._remote),
+                         format_zone_str(zone_name, zone_class))
             return None, None
         elif acl_result == REJECT:
-            logger.info(XFROUT_QUERY_REJECTED, self._request_type,
-                        format_addrinfo(self._remote),
-                        format_zone_str(zone_name, zone_class))
+            logger.debug(DBG_XFROUT_TRACE, XFROUT_QUERY_REJECTED,
+                         self._request_type, format_addrinfo(self._remote),
+                         format_zone_str(zone_name, zone_class))
             return Rcode.REFUSED(), msg
 
         return rcode, msg
@@ -298,23 +324,33 @@ class XfroutSession():
         msg.set_rcode(rcode_)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
-    def _check_xfrout_available(self, zone_name):
-        '''Check if xfr request can be responsed.
-           TODO, Get zone's configuration from cfgmgr or some other place
-           eg. check allow_transfer setting,
+    def _get_zone_soa(self, zone_name):
+        '''Retrieve the SOA RR of the given zone.
+
+        It returns a pair of RCODE and the SOA (in the form of RRset).
+        On success RCODE is NOERROR and returned SOA is not None;
+        on failure RCODE indicates the appropriate code in the context of
+        xfr processing, and the returned SOA is None.
 
         '''
+        result, finder = self._datasrc_client.find_zone(zone_name)
+        if result != DataSourceClient.SUCCESS:
+            return (Rcode.NOTAUTH(), None)
+        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+                                        ZoneFinder.FIND_DEFAULT)
+        if result != ZoneFinder.SUCCESS:
+            return (Rcode.SERVFAIL(), None)
+        # Especially for database-based zones, a working zone may be in
+        # a broken state where it has more than one SOA RR.  We proactively
+        # check the condition and abort the xfr attempt if we identify it.
+        if soa_rrset.get_rdata_count() != 1:
+            return (Rcode.SERVFAIL(), None)
+        return (Rcode.NOERROR(), soa_rrset)
+
+    def __axfr_setup(self, zone_name):
+        '''Setup a zone iterator for AXFR or AXFR-style IXFR.
 
-        # Identify the data source for the requested zone and see if it has
-        # SOA while initializing objects used for request processing later.
-        # We should eventually generalize this so that we can choose the
-        # appropriate data source from (possible) multiple candidates.
-        # We should eventually take into account the RR class here.
-        # For now, we  hardcode a particular type (SQLite3-based), and only
-        # consider that one.
-        datasrc_config = '{ "database_file": "' + \
-            self._server.get_db_file() + '"}'
-        self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
+        '''
         try:
             # Note that we enable 'separate_rrs'.  In xfr-out we need to
             # preserve as many things as possible (even if it's half broken)
@@ -339,6 +375,112 @@ class XfroutSession():
 
         return Rcode.NOERROR()
 
+    def __ixfr_setup(self, request_msg, zone_name, zone_class):
+        '''Setup a zone journal reader for IXFR.
+
+        If the underlying data source does not know the requested range
+        of zone differences it automatically falls back to AXFR-style
+        IXFR by setting up a zone iterator instead of a journal reader.
+
+        '''
+        # Check the authority section.  Look for a SOA record with
+        # the same name and class as the question.
+        remote_soa = None
+        for auth_rrset in request_msg.get_section(Message.SECTION_AUTHORITY):
+            # Ignore data whose owner name is not the zone apex, and
+            # ignore non-SOA or different class of records.
+            if auth_rrset.get_name() != zone_name or \
+                    auth_rrset.get_type() != RRType.SOA() or \
+                    auth_rrset.get_class() != zone_class:
+                continue
+            if auth_rrset.get_rdata_count() != 1:
+                logger.info(XFROUT_IXFR_MULTIPLE_SOA,
+                            format_addrinfo(self._remote))
+                return Rcode.FORMERR()
+            remote_soa = auth_rrset
+        if remote_soa is None:
+            logger.info(XFROUT_IXFR_NO_SOA, format_addrinfo(self._remote))
+            return Rcode.FORMERR()
+
+        # Retrieve the local SOA
+        rcode, self._soa = self._get_zone_soa(zone_name)
+        if rcode != Rcode.NOERROR():
+            return rcode
+
+        # RFC1995 says "If an IXFR query with the same or newer version
+        # number than that of the server is received, it is replied to with
+        # a single SOA record of the server's current version, just as
+        # in AXFR".  The claim about AXFR is incorrect, but other than that,
+        # we do as the RFC says.
+        # Note: until we complete #1278 we can only check equality of the
+        # two serials.  The "newer version" case would fall back to AXFR-style.
+        begin_serial = get_soa_serial(remote_soa.get_rdata()[0])
+        end_serial = get_soa_serial(self._soa.get_rdata()[0])
+        if begin_serial == end_serial:
+            # clear both iterator and jnl_reader to signal we won't do
+            # iteration in response generation
+            self._iterator = None
+            self._jnl_reader = None
+            logger.info(XFROUT_IXFR_UPTODATE, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class),
+                        begin_serial, end_serial)
+            return Rcode.NOERROR()
+
+        # Set up the journal reader or fall back to AXFR-style IXFR
+        try:
+            code, self._jnl_reader = self._datasrc_client.get_journal_reader(
+                zone_name, begin_serial, end_serial)
+        except isc.datasrc.NotImplemented as ex:
+            # The underlying data source doesn't support journaling.
+            # Fall back to AXFR-style IXFR.
+            logger.info(XFROUT_IXFR_NO_JOURNAL_SUPPORT,
+                        format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class))
+            return self.__axfr_setup(zone_name)
+        if code == ZoneJournalReader.NO_SUCH_VERSION:
+            logger.info(XFROUT_IXFR_NO_VERSION, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class),
+                        begin_serial, end_serial)
+            return self.__axfr_setup(zone_name)
+        if code == ZoneJournalReader.NO_SUCH_ZONE:
+            # this is quite unexpected as we know zone's SOA exists.
+            # It might be a bug or the data source is somehow broken,
+            # but it can still happen if someone has removed the zone
+            # between these two operations.  We treat it as NOTAUTH.
+            logger.warn(XFROUT_IXFR_NO_ZONE, format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class))
+            return Rcode.NOTAUTH()
+
+        # Use the reader as the iterator to generate the response.
+        self._iterator = self._jnl_reader
+
+        return Rcode.NOERROR()
+
+    def _xfrout_setup(self, request_msg, zone_name, zone_class):
+        '''Setup a context for xfr responses according to the request type.
+
+        This method identifies the most appropriate data source for the
+        request and set up a zone iterator or journal reader depending on
+        whether the request is AXFR or IXFR.  If it identifies any protocol
+        level error it returns an RCODE other than NOERROR.
+
+        '''
+
+        # Identify the data source for the requested zone and see if it has
+        # SOA while initializing objects used for request processing later.
+        # We should eventually generalize this so that we can choose the
+        # appropriate data source from (possible) multiple candidates.
+        # We should eventually take into account the RR class here.
+        # For now, we hardcode a particular type (SQLite3-based), and only
+        # consider that one.
+        datasrc_config = '{ "database_file": "' + \
+            self._server.get_db_file() + '"}'
+        self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
+
+        if self._request_type == RRType.AXFR():
+            return self.__axfr_setup(zone_name)
+        else:
+            return self.__ixfr_setup(request_msg, zone_name, zone_class)
 
     def dns_xfrout_start(self, sock_fd, msg_query, quota_ok=True):
         rcode_, msg = self._parse_query_message(msg_query)
@@ -351,7 +493,7 @@ class XfroutSession():
             return self._reply_query_with_error_rcode(msg, sock_fd,
                                                       Rcode.FORMERR())
         elif not quota_ok:
-            logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_type,
+            logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_typestr,
                         format_addrinfo(self._remote),
                         self._server._max_transfers_out)
             return self._reply_query_with_error_rcode(msg, sock_fd,
@@ -362,27 +504,25 @@ class XfroutSession():
         zone_class = question.get_class()
         zone_str = format_zone_str(zone_name, zone_class) # for logging
 
-        # TODO: we should also include class in the check
         try:
-            rcode_ = self._check_xfrout_available(zone_name)
+            rcode_ = self._xfrout_setup(msg, zone_name, zone_class)
         except Exception as ex:
-            logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_type,
+            logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_typestr,
                          format_addrinfo(self._remote), zone_str, ex)
             rcode_ = Rcode.SERVFAIL()
         if rcode_ != Rcode.NOERROR():
-            logger.info(XFROUT_AXFR_TRANSFER_FAILED, self._request_type,
+            logger.info(XFROUT_XFR_TRANSFER_FAILED, self._request_typestr,
                         format_addrinfo(self._remote), zone_str, rcode_)
             return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
 
         try:
-            logger.info(XFROUT_AXFR_TRANSFER_STARTED, self._request_type,
+            logger.info(XFROUT_XFR_TRANSFER_STARTED, self._request_typestr,
                         format_addrinfo(self._remote), zone_str)
             self._reply_xfrout_query(msg, sock_fd)
         except Exception as err:
-            logger.error(XFROUT_AXFR_TRANSFER_ERROR, self._request_type,
+            logger.error(XFROUT_XFR_TRANSFER_ERROR, self._request_typestr,
                     format_addrinfo(self._remote), zone_str, err)
-            pass
-        logger.info(XFROUT_AXFR_TRANSFER_DONE, self._request_type,
+        logger.info(XFROUT_XFR_TRANSFER_DONE, self._request_typestr,
                     format_addrinfo(self._remote), zone_str)
 
     def _clear_message(self, msg):
@@ -398,76 +538,93 @@ class XfroutSession():
         msg.set_header_flag(Message.HEADERFLAG_QR)
         return msg
 
-    def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len,
-                                    count_since_last_tsig_sign):
-        '''Add the SOA record to the end of message. If it can't be
-        added, a new message should be created to send out the last soa .
-        '''
-        rrset_len = get_rrset_len(rrset_soa)
+    def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa,
+                                    message_upper_len):
+        '''Add the SOA record to the end of message.
 
-        if (count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH and
-            message_upper_len + rrset_len >= XFROUT_MAX_MESSAGE_SIZE):
-            # If tsig context exist, sign the packet with serial number TSIG_SIGN_EVERY_NTH
+        If it would exceed the maximum allowable size of a message, a new
+        message will be created to send out the last SOA.
+
+        We assume a message with a single SOA can always fit the buffer
+        with or without TSIG.  In theory this could be wrong if TSIG is
+        stupidly large, but in practice this assumption should be reasonable.
+        '''
+        if message_upper_len + get_rrset_len(rrset_soa) > \
+                XFROUT_MAX_MESSAGE_SIZE:
             self._send_message(sock_fd, msg, self._tsig_ctx)
             msg = self._clear_message(msg)
-        elif (count_since_last_tsig_sign != TSIG_SIGN_EVERY_NTH and
-              message_upper_len + rrset_len + self._tsig_len >= XFROUT_MAX_MESSAGE_SIZE):
-            self._send_message(sock_fd, msg)
-            msg = self._clear_message(msg)
 
-        # If tsig context exist, sign the last packet
         msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
-
     def _reply_xfrout_query(self, msg, sock_fd):
-        #TODO, there should be a better way to insert rrset.
-        count_since_last_tsig_sign = TSIG_SIGN_EVERY_NTH
         msg.make_response()
         msg.set_header_flag(Message.HEADERFLAG_AA)
-        msg.add_rrset(Message.SECTION_ANSWER, self._soa)
+        # Reserved space for the fixed header size, the size of the question
+        # section, and TSIG size (when included).  The size of the question
+        # section is the sum of the qname length and the size of the
+        # fixed-length fields (type and class, 2 bytes each).
+        message_upper_len = XFROUT_DNS_HEADER_SIZE + \
+            msg.get_question()[0].get_name().get_length() + 4 + \
+            self._tsig_len
+
+        # If the iterator is None, we are responding to IXFR with a single
+        # SOA RR.
+        if self._iterator is None:
+            self._send_message_with_last_soa(msg, sock_fd, self._soa,
+                                             message_upper_len)
+            return
 
-        message_upper_len = get_rrset_len(self._soa) + self._tsig_len
+        # Add the beginning SOA
+        msg.add_rrset(Message.SECTION_ANSWER, self._soa)
+        message_upper_len += get_rrset_len(self._soa)
 
+        # Add the rest of the zone/diff contets
         for rrset in self._iterator:
             # Check if xfrout is shutdown
             if  self._server._shutdown_event.is_set():
                 logger.info(XFROUT_STOPPING)
                 return
 
-            if rrset.get_type() == RRType.SOA():
+            # For AXFR (or AXFR-style IXFR), in which case _jnl_reader is None,
+            # we should skip SOAs from the iterator.
+            if self._jnl_reader is None and rrset.get_type() == RRType.SOA():
                 continue
 
             # We calculate the maximum size of the RRset (i.e. the
             # size without compression) and use that to see if we
             # may have reached the limit
             rrset_len = get_rrset_len(rrset)
-            if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
+
+            if message_upper_len + rrset_len <= XFROUT_MAX_MESSAGE_SIZE:
                 msg.add_rrset(Message.SECTION_ANSWER, rrset)
                 message_upper_len += rrset_len
                 continue
 
-            # If tsig context exist, sign every N packets
-            if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
-                count_since_last_tsig_sign = 0
-                self._send_message(sock_fd, msg, self._tsig_ctx)
-            else:
-                self._send_message(sock_fd, msg)
+            # RR would not fit.  If there are other RRs in the buffer, send
+            # them now and leave this RR to the next message.
+            self._send_message(sock_fd, msg, self._tsig_ctx)
 
-            count_since_last_tsig_sign += 1
+            # Create a new message and reserve space for the carried-over
+            # RR (and TSIG space in case it's to be TSIG signed)
             msg = self._clear_message(msg)
+            message_upper_len = XFROUT_DNS_HEADER_SIZE + rrset_len + \
+                self._tsig_len
+
+            # If this RR overflows the buffer all by itself, fail.  In theory
+            # some RRs might fit in a TCP message when compressed even if they
+            # do not fit when uncompressed, but surely we don't want to send
+            # such monstrosities to an unsuspecting slave.
+            if message_upper_len > XFROUT_MAX_MESSAGE_SIZE:
+                raise XfroutSessionError('RR too large for zone transfer (' +
+                                         str(rrset_len) + ' bytes)')
+
             # Add the RRset to the new message
             msg.add_rrset(Message.SECTION_ANSWER, rrset)
 
-            # Reserve tsig space for signed packet
-            if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
-                message_upper_len = rrset_len + self._tsig_len
-            else:
-                message_upper_len = rrset_len
-
+        # Add and send the trailing SOA
         self._send_message_with_last_soa(msg, sock_fd, self._soa,
-                                         message_upper_len,
-                                         count_since_last_tsig_sign)
+                                         message_upper_len)
 
 class UnixSockServer(socketserver_mixin.NoPollMixIn,
                      ThreadingUnixStreamServer):
@@ -541,7 +698,7 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
             try:
                 self.process_request(request)
             except Exception as pre:
-                log.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
+                logger.error(XFROUT_PROCESS_REQUEST_ERROR, str(pre))
                 break
 
     def _handle_request_noblock(self):
@@ -656,7 +813,6 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
             os.unlink(self._sock_file)
         except Exception as e:
             logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
-            pass
 
     def update_config_data(self, new_config):
         '''Apply the new config setting of xfrout module.
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index 894ade5..fcc2e59 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -15,37 +15,6 @@
 # No namespace declaration - these constants go in the global namespace
 # of the xfrout messages python module.
 
-% XFROUT_AXFR_TRANSFER_DONE %1 client %2: transfer of %3 complete
-The transfer of the given zone has been completed successfully, or was
-aborted due to a shutdown event.
-
-% XFROUT_AXFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4
-An uncaught exception was encountered while sending the response to
-an AXFR query. The error message of the exception is included in the
-log message, but this error most likely points to incomplete exception
-handling in the code.
-
-% XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4
-Pre-response check for an incomding XFR request failed unexpectedly.
-The most likely cause of this is that some low level error in the data
-source, but it may also be other general (more unlikely) errors such
-as memory shortage.  Some detail of the error is also included in the
-message.  The xfrout server tries to return a SERVFAIL response in this case.
-
-% XFROUT_AXFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4
-A transfer out for the given zone failed. An error response is sent
-to the client. The given rcode is the rcode that is set in the error
-response. This is either NOTAUTH (we are not authoritative for the
-zone), SERVFAIL (our internal database is missing the SOA record for
-the zone), or REFUSED (the limit of simultaneous outgoing AXFR
-transfers, as specified by the configuration value
-Xfrout/max_transfers_out, has been reached).
-# Still a TODO, but when implemented, REFUSED can also mean
-# the client is not allowed to transfer the zone
-
-% XFROUT_AXFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started
-A transfer out of the given zone has started.
-
 % XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
 The TSIG key string as read from the configuration does not represent
 a valid TSIG key.
@@ -178,3 +147,72 @@ on, but the file is in use. The most likely cause is that another
 xfrout daemon process is still running. This xfrout daemon (the one
 printing this message) will not start.
 
+% XFROUT_XFR_TRANSFER_DONE %1 client %2: transfer of %3 complete
+The transfer of the given zone has been completed successfully, or was
+aborted due to a shutdown event.
+
+% XFROUT_XFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4
+An uncaught exception was encountered while sending the response to
+an AXFR query. The error message of the exception is included in the
+log message, but this error most likely points to incomplete exception
+handling in the code.
+
+% XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage.  Some detail of the error is also included in the
+message.  The xfrout server tries to return a SERVFAIL response in this case.
+
+% XFROUT_XFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4
+A transfer out for the given zone failed. An error response is sent
+to the client. The given rcode is the rcode that is set in the error
+response. This is either NOTAUTH (we are not authoritative for the
+zone), SERVFAIL (our internal database is missing the SOA record for
+the zone), or REFUSED (the limit of simultaneous outgoing AXFR
+transfers, as specified by the configuration value
+Xfrout/max_transfers_out, has been reached).
+# Still a TODO, but when implemented, REFUSED can also mean
+# the client is not allowed to transfer the zone
+
+% XFROUT_XFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started
+A transfer out of the given zone has started.
+
+% XFROUT_IXFR_MULTIPLE_SOA IXFR client %1: authority section has multiple SOAs
+An IXFR request was received with more than one SOA RRs in the authority
+section.  The xfrout daemon rejects the request with an RCODE of
+FORMERR.
+
+% XFROUT_IXFR_NO_SOA IXFR client %1: missing SOA
+An IXFR request was received with no SOA RR in the authority section.
+The xfrout daemon rejects the request with an RCODE of FORMERR.
+
+% XFROUT_IXFR_NO_JOURNAL_SUPPORT IXFR client %1, %2: journaling not supported in the data source, falling back to AXFR
+An IXFR request was received but the underlying data source did
+not support journaling.  The xfrout daemon fell back to AXFR-style
+IXFR.
+
+% XFROUT_IXFR_UPTODATE IXFR client %1, %2: client version is new enough (theirs=%3, ours=%4)
+An IXFR request was received, but the client's SOA version is the same as
+or newer than that of the server.  The xfrout server responds to the
+request with the answer section being just one SOA of that version.
+Note: as of this wrting the 'newer version' cannot be identified due to
+the lack of support for the serial number arithmetic.  This will soon
+be implemented.
+
+% XFROUT_IXFR_NO_VERSION IXFR client %1, %2: version (%3 to %4) not in journal, falling back to AXFR
+An IXFR request was received, but the requested range of differences
+were not found in the data source.  The xfrout daemon fell back to
+AXFR-style IXFR.
+
+% XFROUT_IXFR_NO_ZONE IXFR client %1, %2: zone not found with journal
+The requested zone in IXFR was not found in the data source
+even though the xfrout daemon sucessfully found the SOA RR of the zone
+in the data source.  This can happen if the administrator removed the
+zone from the data source within the small duration between these
+operations, but it's more likely to be a bug or broken data source.
+Unless you know why this message was logged, and especially if it
+happens often, it's advisable to check whether the data source is
+valid for this zone.  The xfrout daemon considers it a possible,
+though unlikely, event, and returns a response with an RCODE of
+NOTAUTH.
diff --git a/src/lib/asiodns/io_fetch.cc b/src/lib/asiodns/io_fetch.cc
index 466be3e..8a91982 100644
--- a/src/lib/asiodns/io_fetch.cc
+++ b/src/lib/asiodns/io_fetch.cc
@@ -355,10 +355,6 @@ IOFetch::stop(Result result) {
         // variable should be done inside a mutex (and the stopped_ variable
         // declared as "volatile").
         //
-        // The numeric arguments indicate the debug level, with the lower
-        // numbers indicating the most important information.  The relative
-        // values are somewhat arbitrary.
-        //
         // TODO: Update testing of stopped_ if threads are used.
         data_->stopped = true;
         switch (result) {
diff --git a/src/lib/asiolink/Makefile.am b/src/lib/asiolink/Makefile.am
index 5444547..07c3e13 100644
--- a/src/lib/asiolink/Makefile.am
+++ b/src/lib/asiolink/Makefile.am
@@ -14,6 +14,9 @@ CLEANFILES = *.gcno *.gcda
 # with -Werror (our default setting).
 
 lib_LTLIBRARIES = libasiolink.la
+
+libasiolink_la_LDFLAGS = -no-undefined -version-info 1:0:1
+
 libasiolink_la_SOURCES  = asiolink.h
 libasiolink_la_SOURCES += dummy_io_cb.h
 libasiolink_la_SOURCES += interval_timer.cc interval_timer.h
diff --git a/src/lib/cryptolink/Makefile.am b/src/lib/cryptolink/Makefile.am
index 93f3443..fc12fae 100644
--- a/src/lib/cryptolink/Makefile.am
+++ b/src/lib/cryptolink/Makefile.am
@@ -11,4 +11,5 @@ lib_LTLIBRARIES = libcryptolink.la
 libcryptolink_la_SOURCES = cryptolink.h cryptolink.cc
 libcryptolink_la_SOURCES += crypto_hmac.h crypto_hmac.cc
 
-libcryptolink_la_LIBADD = ${BOTAN_LDFLAGS} ${BOTAN_RPATH}
+libcryptolink_la_LDFLAGS = ${BOTAN_LDFLAGS}
+libcryptolink_la_LIBADD = ${BOTAN_LIBS} ${BOTAN_RPATH}
diff --git a/src/lib/cryptolink/tests/Makefile.am b/src/lib/cryptolink/tests/Makefile.am
index fbdd13f..6ac6fdf 100644
--- a/src/lib/cryptolink/tests/Makefile.am
+++ b/src/lib/cryptolink/tests/Makefile.am
@@ -16,8 +16,8 @@ TESTS += run_unittests
 run_unittests_SOURCES = run_unittests.cc
 run_unittests_SOURCES += crypto_unittests.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = ${BOTAN_LDFLAGS} $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS =  $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS) 
+run_unittests_LDADD = $(GTEST_LDADD) $(BOTAN_LIBS)
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libcryptolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index bf1171e..b6c314c 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,9 +7,15 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
+pkglibexecdir = $(libexecdir)/@PACKAGE@/backends
+
+datasrc_config.h: datasrc_config.h.pre
+	$(SED) -e "s|@@PKGLIBEXECDIR@@|$(pkglibexecdir)|" datasrc_config.h.pre >$@
+
 CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
+CLEANFILES += datasrc_config.h
 
-lib_LTLIBRARIES = libdatasrc.la sqlite3_ds.la memory_ds.la
+lib_LTLIBRARIES = libdatasrc.la
 libdatasrc_la_SOURCES = data_source.h data_source.cc
 libdatasrc_la_SOURCES += static_datasrc.h static_datasrc.cc
 libdatasrc_la_SOURCES += sqlite3_datasrc.h sqlite3_datasrc.cc
@@ -25,8 +31,11 @@ libdatasrc_la_SOURCES += database.h database.cc
 libdatasrc_la_SOURCES += factory.h factory.cc
 nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
 
+pkglibexec_LTLIBRARIES =  sqlite3_ds.la memory_ds.la
+
 sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
 sqlite3_ds_la_LDFLAGS = -module
+sqlite3_ds_la_LDFLAGS += -no-undefined -version-info 1:0:0
 sqlite3_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
 sqlite3_ds_la_LIBADD += libdatasrc.la
 sqlite3_ds_la_LIBADD += $(SQLITE_LIBS)
@@ -42,7 +51,7 @@ libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
 libdatasrc_la_LIBADD += $(SQLITE_LIBS)
 
-BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+BUILT_SOURCES = datasrc_config.h datasrc_messages.h datasrc_messages.cc
 datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
 	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
 
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 18d04f7..24c8850 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -15,6 +15,8 @@
 #ifndef __DATA_SOURCE_CLIENT_H
 #define __DATA_SOURCE_CLIENT_H 1
 
+#include <utility>
+
 #include <boost/noncopyable.hpp>
 #include <boost/shared_ptr.hpp>
 
@@ -311,6 +313,55 @@ public:
     virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
                                       bool replace, bool journaling = false)
         const = 0;
+
+    /// Return a journal reader to retrieve differences of a zone.
+    ///
+    /// A derived version of this method creates a concrete
+    /// \c ZoneJournalReader object specific to the underlying data source
+    /// for the specified name of zone and differences between the versions
+    /// specified by the beginning and ending serials of the corresponding
+    /// SOA RRs.
+    /// The RR class of the zone is the one that the client is expected to
+    /// handle (see the detailed description of this class).
+    ///
+    /// Note that the SOA serials are compared by the semantics of the serial
+    /// number arithmetic.  So, for example, \c begin_serial can be larger than
+    /// \c end_serial as bare unsigned integers.  The underlying data source
+    /// implementation is assumed to keep track of sufficient history to
+    /// identify (if exist) the corresponding difference between the specified
+    /// versions.
+    ///
+    /// This method returns the result as a pair of a result code and
+    /// a pointer to a \c ZoneJournalReader object.  On success, the result
+    /// code is \c SUCCESS and the pointer must be non NULL; otherwise
+    /// the result code is something other than \c SUCCESS and the pinter
+    /// must be NULL.
+    ///
+    /// If the specified zone is not found in the data source, the result
+    /// code is \c NO_SUCH_ZONE.
+    /// Otherwise, if specified range of difference for the zone is not found
+    /// in the data source, the result code is \c NO_SUCH_VERSION.
+    ///
+    /// Handling differences is an optional feature of data source.
+    /// If the underlying data source does not support difference handling,
+    /// this method for that type of data source can throw an exception of
+    /// class \c NotImplemented.
+    ///
+    /// \exception NotImplemented The data source does not support differences.
+    /// \exception DataSourceError Other operational errors at the data source
+    /// level.
+    ///
+    /// \param zone The name of the zone for which the difference should be
+    /// retrieved.
+    /// \param begin_serial The SOA serial of the beginning version of the
+    /// differences.
+    /// \param end_serial The SOA serial of the ending version of the
+    /// differences.
+    ///
+    /// \return A pair of result code and a pointer to \c ZoneJournalReader.
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const = 0;
 };
 }
 }
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 187c376..5de303b 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -13,6 +13,7 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <string>
+#include <utility>
 #include <vector>
 
 #include <datasrc/database.h>
@@ -1011,7 +1012,7 @@ public:
         committed_(false), accessor_(accessor), zone_id_(zone_id),
         db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
         zone_class_(zone_class), journaling_(journaling),
-        diff_phase_(NOT_STARTED),
+        diff_phase_(NOT_STARTED), serial_(0),
         finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
     {
         logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
@@ -1064,7 +1065,7 @@ private:
         ADD
     };
     DiffPhase diff_phase_;
-    uint32_t serial_;
+    Serial serial_;
     boost::scoped_ptr<DatabaseClient::Finder> finder_;
 
     // This is a set of validation checks commonly used for addRRset() and
@@ -1153,8 +1154,8 @@ DatabaseUpdater::addRRset(const RRset& rrset) {
         columns[Accessor::ADD_RDATA] = it->getCurrent().toText();
         if (journaling_) {
             journal[Accessor::DIFF_RDATA] = columns[Accessor::ADD_RDATA];
-            accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_ADD,
-                                     journal);
+            accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+                                     Accessor::DIFF_ADD, journal);
         }
         accessor_->addRecordToZone(columns);
     }
@@ -1191,8 +1192,8 @@ DatabaseUpdater::deleteRRset(const RRset& rrset) {
         params[Accessor::DEL_RDATA] = it->getCurrent().toText();
         if (journaling_) {
             journal[Accessor::DIFF_RDATA] = params[Accessor::DEL_RDATA];
-            accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_DELETE,
-                                     journal);
+            accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+                                     Accessor::DIFF_DELETE, journal);
         }
         accessor_->deleteRecordInZone(params);
     }
@@ -1238,5 +1239,105 @@ DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace,
     return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
                                                name, rrclass_, journaling)));
 }
-}   // namespace datasrc
-}   // namespace isc
+
+//
+// Zone journal reader using some database system as the underlying data
+//  source.
+//
+class DatabaseJournalReader : public ZoneJournalReader {
+private:
+    // A shortcut typedef to keep the code concise.
+    typedef DatabaseAccessor Accessor;
+public:
+    DatabaseJournalReader(shared_ptr<Accessor> accessor, const Name& zone,
+                          int zone_id, const RRClass& rrclass, uint32_t begin,
+                          uint32_t end) :
+        accessor_(accessor), zone_(zone), rrclass_(rrclass),
+        begin_(begin), end_(end), finished_(false)
+    {
+        context_ = accessor_->getDiffs(zone_id, begin, end);
+    }
+    virtual ~DatabaseJournalReader() {}
+    virtual ConstRRsetPtr getNextDiff() {
+        if (finished_) {
+            isc_throw(InvalidOperation,
+                      "Diff read attempt past the end of sequence on "
+                      << accessor_->getDBName());
+        }
+
+        string data[Accessor::COLUMN_COUNT];
+        if (!context_->getNext(data)) {
+            finished_ = true;
+            LOG_DEBUG(logger, DBG_TRACE_BASIC,
+                      DATASRC_DATABASE_JOURNALREADER_END).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+                arg(begin_).arg(end_);
+            return (ConstRRsetPtr());
+        }
+
+        try {
+            RRsetPtr rrset(new RRset(Name(data[Accessor::NAME_COLUMN]),
+                                     rrclass_,
+                                     RRType(data[Accessor::TYPE_COLUMN]),
+                                     RRTTL(data[Accessor::TTL_COLUMN])));
+            rrset->addRdata(rdata::createRdata(rrset->getType(), rrclass_,
+                                               data[Accessor::RDATA_COLUMN]));
+            LOG_DEBUG(logger, DBG_TRACE_DETAILED,
+                      DATASRC_DATABASE_JOURNALREADER_NEXT).
+                arg(rrset->getName()).arg(rrset->getType()).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName());
+            return (rrset);
+        } catch (const Exception& ex) {
+            LOG_ERROR(logger, DATASRC_DATABASE_JOURNALREADR_BADDATA).
+                arg(zone_).arg(rrclass_).arg(accessor_->getDBName()).
+                arg(begin_).arg(end_).arg(ex.what());
+            isc_throw(DataSourceError, "Failed to create RRset from diff on "
+                      << accessor_->getDBName());
+        }
+    }
+
+private:
+    shared_ptr<Accessor> accessor_;
+    const Name zone_;
+    const RRClass rrclass_;
+    Accessor::IteratorContextPtr context_;
+    const uint32_t begin_;
+    const uint32_t end_;
+    bool finished_;
+};
+
+// The JournalReader factory
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+DatabaseClient::getJournalReader(const isc::dns::Name& zone,
+                                 uint32_t begin_serial,
+                                 uint32_t end_serial) const
+{
+    shared_ptr<DatabaseAccessor> jnl_accessor(accessor_->clone());
+    const pair<bool, int> zoneinfo(jnl_accessor->getZone(zone.toText()));
+    if (!zoneinfo.first) {
+        return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+                    ZoneJournalReader::NO_SUCH_ZONE,
+                    ZoneJournalReaderPtr()));
+    }
+
+    try {
+        const pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> ret(
+            ZoneJournalReader::SUCCESS,
+            ZoneJournalReaderPtr(new DatabaseJournalReader(jnl_accessor,
+                                                           zone,
+                                                           zoneinfo.second,
+                                                           rrclass_,
+                                                           begin_serial,
+                                                           end_serial)));
+        LOG_DEBUG(logger, DBG_TRACE_BASIC,
+                  DATASRC_DATABASE_JOURNALREADER_START).arg(zone).arg(rrclass_).
+            arg(jnl_accessor->getDBName()).arg(begin_serial).arg(end_serial);
+        return (ret);
+    } catch (const NoSuchSerial&) {
+        return (pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>(
+                    ZoneJournalReader::NO_SUCH_VERSION,
+                    ZoneJournalReaderPtr()));
+    }
+}
+}
+}
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index 0027415..7b94735 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -24,6 +24,8 @@
 #include <dns/rrset.h>
 #include <dns/rrtype.h>
 
+#include <datasrc/data_source.h>
+#include <datasrc/client.h>
 #include <datasrc/client.h>
 #include <datasrc/logger.h>
 
@@ -517,12 +519,10 @@ public:
     /// is not for the SOA RR; it passes TTL for a diff that deletes an RR
     /// while in \c deleteRecordInZone() it's omitted.  This is because
     /// the stored diffs are expected to be retrieved in the form that
-    /// \c getRecordDiffs() is expected to meet.  This means if the caller
+    /// \c getDiffs() is expected to meet.  This means if the caller
     /// wants to use this method with other update operations, it must
     /// ensure the additional information is ready when this method is called.
     ///
-    /// \note \c getRecordDiffs() is not yet implemented.
-    ///
     /// The caller of this method must ensure that the added diffs via
     /// this method in a single transaction form an IXFR-style difference
     /// sequences: Each difference sequence is a sequence of RRs:
@@ -535,7 +535,7 @@ public:
     /// an SOA RR, \c serial must be identical to the serial of that SOA).
     /// The underlying derived class implementation may or may not check
     /// this condition, but if the caller doesn't meet the condition
-    /// a subsequent call to \c getRecordDiffs() will not work as expected.
+    /// a subsequent call to \c getDiffs() will not work as expected.
     ///
     /// Any call to this method must be in a transaction, and, for now,
     /// it must be a transaction triggered by \c startUpdateZone() (that is,
@@ -1092,6 +1092,15 @@ public:
                                       bool replace,
                                       bool journaling = false) const;
 
+
+    /// This implementation internally clones the accessor from the one
+    /// used in the client for retrieving diffs and iterating over them.
+    /// The returned reader object will be able to work separately from
+    /// the original client.
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const;
+
 private:
     /// \brief The RR class that this client handles.
     const isc::dns::RRClass rrclass_;
diff --git a/src/lib/datasrc/datasrc_config.h.pre.in b/src/lib/datasrc/datasrc_config.h.pre.in
new file mode 100644
index 0000000..ff99601
--- /dev/null
+++ b/src/lib/datasrc/datasrc_config.h.pre.in
@@ -0,0 +1,31 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+#ifndef __DATASRC_CONFIG_H
+#define __DATASRC_CONFIG_H 1
+
+namespace isc {
+namespace datasrc {
+
+/// \brief Default directory to find the loadable data source libraries
+///
+/// This is the directory where, once installed, loadable backend libraries
+/// such as memory_ds.so and sqlite3_ds.so are found. It is used by the
+/// DataSourceClient loader if no absolute path is used and
+/// B10_FROM_BUILD is not set in the environment.
+const char* const BACKEND_LIBRARY_PATH = "@@PKGLIBEXECDIR@@/";
+
+} // end namespace datasrc
+} // end namespace isc
+
+#endif // __DATASRC_CONFIG_H
diff --git a/src/lib/datasrc/datasrc_messages.mes b/src/lib/datasrc/datasrc_messages.mes
index f46efce..01fb082 100644
--- a/src/lib/datasrc/datasrc_messages.mes
+++ b/src/lib/datasrc/datasrc_messages.mes
@@ -662,3 +662,66 @@ data source.
 % DATASRC_UNEXPECTED_QUERY_STATE unexpected query state
 This indicates a programming error. An internal task of unknown type was
 generated.
+
+% DATASRC_DATABASE_UPDATER_CREATED zone updater created for '%1/%2' on %3
+Debug information.  A zone updater object is created to make updates to
+the shown zone on the shown backend database.
+
+% DATASRC_DATABASE_UPDATER_DESTROYED zone updater destroyed for '%1/%2' on %3
+Debug information.  A zone updater object is destroyed, either successfully
+or after failure of, making updates to the shown zone on the shown backend
+database.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACK zone updates roll-backed for '%1/%2' on %3
+A zone updater is being destroyed without committing the changes.
+This would typically mean the update attempt was aborted due to some
+error, but may also be a bug of the application that forgets committing
+the changes.  The intermediate changes made through the updater won't
+be applied to the underlying database.  The zone name, its class, and
+the underlying database name are shown in the log message.
+
+%DATASRC_DATABASE_UPDATER_ROLLBACKFAIL failed to roll back zone updates for '%1/%2' on %3: %4
+A zone updater is being destroyed without committing the changes to
+the database, and attempts to rollback incomplete updates, but it
+unexpectedly fails.  The higher level implementation does not expect
+it to fail, so this means either a serious operational error in the
+underlying data source (such as a system failure of a database) or
+software bug in the underlying data source implementation.  In either
+case if this message is logged the administrator should carefully
+examine the underlying data source to see what exactly happens and
+whether the data is still valid.  The zone name, its class, and the
+underlying database name as well as the error message thrown from the
+database module are shown in the log message.
+
+% DATASRC_DATABASE_UPDATER_COMMIT updates committed for '%1/%2' on %3
+Debug information.  A set of updates to a zone has been successfully
+committed to the corresponding database backend.  The zone name,
+its class and the database name are printed.
+
+% DATASRC_DATABASE_JOURNALREADER_START %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program starts reading
+a zone's difference sequences from a database-based data source.  The
+zone's name and class, database name, and the start and end serials
+are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_NEXT %1/%2 in %3/%4 on %5
+This is a debug message indicating that the program retrieves one
+difference in difference sequences of a zone and successfully converts
+it to an RRset.  The zone's name and class, database name, and the
+name and RR type of the retrieved diff are shown in the message.
+
+% DATASRC_DATABASE_JOURNALREADER_END %1/%2 on %3 from %4 to %5
+This is a debug message indicating that the program (successfully)
+reaches the end of sequences of a zone's differences.  The zone's name
+and class, database name, and the start and end serials are shown in
+the message.
+
+% DATASRC_DATABASE_JOURNALREADR_BADDATA failed to convert a diff to RRset in %1/%2 on %3 between %4 and %5: %6
+This is an error message indicating that a zone's diff is broken and
+the data source library failed to convert it to a valid RRset.  The
+most likely cause of this is that someone has manually modified the
+zone's diff in the database and inserted invalid data as a result.
+The zone's name and class, database name, and the start and end
+serials, and an additional detail of the error are shown in the
+message.  The administrator should examine the diff in the database
+to find any invalid data and fix it.
diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc
index 1818c70..35a79fe 100644
--- a/src/lib/datasrc/factory.cc
+++ b/src/lib/datasrc/factory.cc
@@ -19,13 +19,59 @@
 #include "sqlite3_accessor.h"
 #include "memory_datasrc.h"
 
+#include "datasrc_config.h"
+
 #include <datasrc/logger.h>
 
 #include <dlfcn.h>
+#include <cstdlib>
 
+using namespace std;
 using namespace isc::data;
 using namespace isc::datasrc;
 
+namespace {
+// This helper function takes the 'type' string as passed to
+// the DataSourceClient container below, and, unless it
+// already specifies a specific loadable .so file, will
+// convert the short-name to the full file.
+// I.e. it will add '_ds.so' (if necessary), and prepend
+// it with an absolute path (if necessary).
+// Returns the resulting string to use with LibraryContainer.
+const std::string
+getDataSourceLibFile(const std::string& type) {
+    if (type.empty()) {
+        isc_throw(DataSourceLibraryError,
+                  "DataSourceClient container called with empty type value");
+    }
+    if (type == ".so") {
+        isc_throw(DataSourceLibraryError, "DataSourceClient container called "
+                                          "with bad type or file name");
+    }
+
+    // Type can be either a short name, in which case we need to
+    // append "_ds.so", or it can be a direct .so library.
+    std::string lib_file = type;
+    const int ext_pos = lib_file.rfind(".so");
+    if (ext_pos == std::string::npos || ext_pos + 3 != lib_file.length()) {
+        lib_file.append("_ds.so");
+    }
+    // And if it is not an absolute path, prepend it with our
+    // loadable backend library path
+    if (type[0] != '/') {
+        // When running from the build tree, we do NOT want
+        // to load the installed loadable library
+        if (getenv("B10_FROM_BUILD") != NULL) {
+            lib_file = std::string(getenv("B10_FROM_BUILD")) +
+                       "/src/lib/datasrc/.libs/" + lib_file;
+        } else {
+            lib_file = isc::datasrc::BACKEND_LIBRARY_PATH + lib_file;
+        }
+    }
+    return (lib_file);
+}
+} // end anonymous namespace
+
 namespace isc {
 namespace datasrc {
 
@@ -34,7 +80,10 @@ LibraryContainer::LibraryContainer(const std::string& name) {
     // are recognized as such
     ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_GLOBAL);
     if (ds_lib_ == NULL) {
-        isc_throw(DataSourceLibraryError, dlerror());
+        // This may cause the filename to appear twice in the actual
+        // error, but the output of dlerror is implementation-dependent
+        isc_throw(DataSourceLibraryError, "dlopen failed for " << name << 
+                                          ": " << dlerror());
     }
 }
 
@@ -61,7 +110,7 @@ LibraryContainer::getSym(const char* name) {
 
 DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
                                                      ConstElementPtr config)
-: ds_lib_(type + "_ds.so")
+: ds_lib_(getDataSourceLibFile(type))
 {
     // We are casting from a data to a function pointer here
     // Some compilers (rightfully) complain about that, but
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
index 0284067..9d0a762 100644
--- a/src/lib/datasrc/factory.h
+++ b/src/lib/datasrc/factory.h
@@ -68,7 +68,7 @@ public:
     ///             the library path.
     ///
     /// \exception DataSourceLibraryError If the library cannot be found or
-    ///            cannot be loaded.
+    ///            cannot be loaded, or if name is an empty string.
     LibraryContainer(const std::string& name);
 
     /// \brief Destructor
@@ -115,6 +115,15 @@ private:
 /// easy recognition and to reduce potential mistakes.
 /// For example, the sqlite3 implementation has the type 'sqlite3', and the
 /// derived filename 'sqlite3_ds.so'
+/// The value of type can be a specific loadable library; if it already ends
+/// with '.so', the loader will not add '_ds.so'.
+/// It may also be an absolute path; if it starts with '/', nothing is
+/// prepended. If it does not, the loadable library will be taken from the
+/// installation directory, see the value of
+/// isc::datasrc::BACKEND_LIBRARY_PATH in datasrc_config.h for the exact path.
+///
+/// \note When 'B10_FROM_BUILD' is set in the environment, the build
+///       directory is used instead of the install directory.
 ///
 /// There are of course some demands to an implementation, not all of which
 /// can be verified compile-time. It must provide a creator and destructor
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 1bf17f6..a79ee5b 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -850,6 +850,13 @@ InMemoryClient::getUpdater(const isc::dns::Name&, bool, bool) const {
     isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
 }
 
+pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+InMemoryClient::getJournalReader(const isc::dns::Name&, uint32_t,
+                                 uint32_t) const
+{
+    isc_throw(isc::NotImplemented, "Journaling isn't supported for "
+              "in memory data source");
+}
 
 namespace {
 // convencience function to add an error message to a list of those
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index a9764fe..b852eb3 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -287,6 +287,10 @@ public:
                                       bool replace, bool journaling = false)
         const;
 
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name& zone, uint32_t begin_serial,
+                     uint32_t end_serial) const;
+
 private:
     // TODO: Do we still need the PImpl if nobody should manipulate this class
     // directly any more (it should be handled through DataSourceClient)?
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 250b46a..08be824 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -71,7 +71,6 @@ public:
         DataSourceError(file, line, what) {}
 };
 
-
 struct SQLite3Parameters;
 
 /**
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 70f2999..6dd6b0a 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -55,13 +55,6 @@ run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
 run_unittests_SOURCES += rbtree_unittest.cc
 run_unittests_SOURCES += logger_unittest.cc
 run_unittests_SOURCES += client_unittest.cc
-if !USE_STATIC_LINK
-# This test uses dynamically loadable module.  It will cause various
-# troubles with static link such as "missing" symbols in the static object
-# for the module.  As a workaround we disable this particualr test
-# in this case.
-run_unittests_SOURCES += factory_unittest.cc
-endif
 
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
@@ -110,3 +103,21 @@ EXTRA_DIST += testdata/test.sqlite3
 EXTRA_DIST += testdata/test.sqlite3.nodiffs
 EXTRA_DIST += testdata/rwtest.sqlite3
 EXTRA_DIST += testdata/diffs.sqlite3
+
+# For the factory unit tests, we need to specify that we want
+# the loadable backend libraries from the build tree, and not from 
+# the installation directory. Therefore we build it into a separate
+# binary, and call that from check-local with B10_FROM_BUILD set.
+# Also, we only want to do this when static building is not used,
+# since it will cause various troubles with static link such as
+# "missing" symbols in the static object for the module.
+if !USE_STATIC_LINK
+noinst_PROGRAMS+=run_unittests_factory
+run_unittests_factory_SOURCES = $(common_sources)
+run_unittests_factory_SOURCES += factory_unittest.cc
+run_unittests_factory_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_factory_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
+run_unittests_factory_LDADD = $(common_ldadd)
+check-local:
+	B10_FROM_BUILD=${abs_top_builddir} ./run_unittests_factory
+endif
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
index ade6fc7..64ad25f 100644
--- a/src/lib/datasrc/tests/client_unittest.cc
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -12,6 +12,8 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <utility>
+
 #include <datasrc/client.h>
 
 #include <dns/name.h>
@@ -37,6 +39,11 @@ public:
     {
         return (ZoneUpdaterPtr());
     }
+    virtual std::pair<ZoneJournalReader::Result, ZoneJournalReaderPtr>
+    getJournalReader(const isc::dns::Name&, uint32_t, uint32_t) const {
+        isc_throw(isc::NotImplemented, "Journaling isn't supported "
+                  "in Nop data source");
+    }
 };
 
 class ClientTest : public ::testing::Test {
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 007c634..920c9a2 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -12,11 +12,15 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
+#include <stdlib.h>
+
 #include <boost/shared_ptr.hpp>
 #include <boost/lexical_cast.hpp>
 
 #include <gtest/gtest.h>
 
+#include <exceptions/exceptions.h>
+
 #include <dns/name.h>
 #include <dns/rrttl.h>
 #include <dns/rrset.h>
@@ -31,6 +35,7 @@
 #include <testutils/dnsmessage_test.h>
 
 #include <map>
+#include <vector>
 
 using namespace isc::datasrc;
 using namespace std;
@@ -259,7 +264,7 @@ public:
 
     virtual IteratorContextPtr getDiffs(int, uint32_t, uint32_t) const {
         isc_throw(isc::NotImplemented,
-                  "This database datasource can't be iterated");
+                  "This database datasource doesn't support diffs");
     }
 
     virtual std::string findPreviousName(int, const std::string&) const {
@@ -550,6 +555,29 @@ private:
             }
         }
     };
+    class MockDiffIteratorContext : public IteratorContext {
+        const vector<JournalEntry> diffs_;
+        vector<JournalEntry>::const_iterator it_;
+    public:
+        MockDiffIteratorContext(const vector<JournalEntry>& diffs) :
+            diffs_(diffs), it_(diffs_.begin())
+        {}
+        virtual bool getNext(string (&data)[COLUMN_COUNT]) {
+            if (it_ == diffs_.end()) {
+                return (false);
+            }
+            data[DatabaseAccessor::NAME_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_NAME];
+            data[DatabaseAccessor::TYPE_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_TYPE];
+            data[DatabaseAccessor::TTL_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_TTL];
+            data[DatabaseAccessor::RDATA_COLUMN] =
+                (*it_).data_[DatabaseAccessor::DIFF_RDATA];
+            ++it_;
+            return (true);
+        }
+    };
 public:
     virtual IteratorContextPtr getAllRecords(int id) const {
         if (id == READONLY_ZONE_ID) {
@@ -729,12 +757,52 @@ public:
             isc_throw(DataSourceError, "Test error");
         } else {
             journal_entries_->push_back(JournalEntry(id, serial, operation,
-                                                    data));
+                                                     data));
+        }
+    }
+
+    virtual IteratorContextPtr getDiffs(int id, uint32_t start,
+                                        uint32_t end) const
+    {
+        vector<JournalEntry> selected_jnl;
+
+        for (vector<JournalEntry>::const_iterator it =
+                 journal_entries_->begin();
+             it != journal_entries_->end(); ++it)
+        {
+            // For simplicity we assume this method is called for the
+            // "readonly" zone possibly after making updates on the "writable"
+            // copy and committing them.
+            if (id != READONLY_ZONE_ID) {
+                continue;
+            }
+
+            // Note: the following logic is not 100% accurate in terms of
+            // serial number arithmetic; we prefer brevity for testing.
+            // Skip until we see the starting serial.  Once we started
+            // recording this condition is ignored (to support wrap-around
+            // case).  Also, it ignores the RR type; it only checks the
+            // versions.
+            if ((*it).serial_ < start && selected_jnl.empty()) {
+                continue;
+            }
+            if ((*it).serial_ > end) { // gone over the end serial. we're done.
+                break;
+            }
+            selected_jnl.push_back(*it);
         }
+
+        // Check if we've found the requested range.  If not, throw.
+        if (selected_jnl.empty() || selected_jnl.front().serial_ != start ||
+            selected_jnl.back().serial_ != end) {
+            isc_throw(NoSuchSerial, "requested diff range is not found");
+        }
+
+        return (IteratorContextPtr(new MockDiffIteratorContext(selected_jnl)));
     }
 
     // Check the journal is as expected and clear the journal
-    void checkJournal(const std::vector<JournalEntry> &expected) {
+    void checkJournal(const std::vector<JournalEntry> &expected) const {
         std::vector<JournalEntry> journal;
         // Clean the journal, but keep local copy to check
         journal.swap(*journal_entries_);
@@ -903,6 +971,24 @@ public:
      * times per test.
      */
     void createClient() {
+        // To make sure we always have empty diffs table at the beginning of
+        // each test, we re-install the writable data source here.
+        // Note: this is SQLite3 specific and a waste (though otherwise
+        // harmless) for other types of data sources.  If and when we support
+        // more types of data sources in this test framework, we should
+        // probably move this to some specialized templated method specific
+        // to SQLite3 (or for even a longer term we should add an API to
+        // purge the diffs table).
+        const char* const install_cmd = INSTALL_PROG " " TEST_DATA_DIR
+            "/rwtest.sqlite3 " TEST_DATA_BUILDDIR
+            "/rwtest.sqlite3.copied";
+        if (system(install_cmd) != 0) {
+            // any exception will do, this is failure in test setup, but nice
+            // to show the command that fails, and shouldn't be caught
+            isc_throw(isc::Exception,
+                      "Error setting up; command failed: " << install_cmd);
+        }
+
         current_accessor_ = new ACCESSOR_TYPE();
         is_mock_ = (dynamic_cast<MockAccessor*>(current_accessor_) != NULL);
         client_.reset(new DatabaseClient(qclass_,
@@ -968,6 +1054,48 @@ public:
         }
     }
 
+    void checkJournal(const vector<JournalEntry>& expected) {
+        if (is_mock_) {
+            const MockAccessor* mock_accessor =
+                dynamic_cast<const MockAccessor*>(current_accessor_);
+            mock_accessor->checkJournal(expected);
+        } else {
+            // For other generic databases, retrieve the diff using the
+            // reader class and compare the resulting sequence of RRset.
+            // For simplicity we only consider the case where the expected
+            // sequence is not empty.
+            ASSERT_FALSE(expected.empty());
+            const Name zone_name(expected.front().
+                                 data_[DatabaseAccessor::DIFF_NAME]);
+            ZoneJournalReaderPtr jnl_reader =
+                client_->getJournalReader(zone_name,
+                                          expected.front().serial_,
+                                          expected.back().serial_).second;
+            ASSERT_TRUE(jnl_reader);
+            ConstRRsetPtr rrset;
+            vector<JournalEntry>::const_iterator it = expected.begin();
+            for (rrset = jnl_reader->getNextDiff();
+                 rrset && it != expected.end();
+                 rrset = jnl_reader->getNextDiff(), ++it) {
+                typedef DatabaseAccessor Accessor;
+                RRsetPtr expected_rrset(
+                    new RRset(Name((*it).data_[Accessor::DIFF_NAME]),
+                              qclass_,
+                              RRType((*it).data_[Accessor::DIFF_TYPE]),
+                              RRTTL((*it).data_[Accessor::DIFF_TTL])));
+                expected_rrset->addRdata(
+                    rdata::createRdata(expected_rrset->getType(),
+                                       expected_rrset->getClass(),
+                                       (*it).data_[Accessor::DIFF_RDATA]));
+                isc::testutils::rrsetCheck(expected_rrset, rrset);
+            }
+            // We should have examined all entries of both expected and
+            // actual data.
+            EXPECT_TRUE(it == expected.end());
+            ASSERT_FALSE(rrset);
+        }
+    }
+
     // Some tests only work for MockAccessor.  We remember whether our accessor
     // is of that type.
     bool is_mock_;
@@ -2800,17 +2928,20 @@ TEST_F(MockDatabaseClientTest, badName) {
 /*
  * Test correct use of the updater with a journal.
  */
-TEST_F(MockDatabaseClientTest, journal) {
-    updater_ = client_->getUpdater(zname_, false, true);
-    updater_->deleteRRset(*soa_);
-    updater_->deleteRRset(*rrset_);
-    soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
-    soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
-                                      "ns1.example.org. admin.example.org. "
-                                      "1235 3600 1800 2419200 7200"));
-    updater_->addRRset(*soa_);
-    updater_->addRRset(*rrset_);
-    ASSERT_NO_THROW(updater_->commit());
+TYPED_TEST(DatabaseClientTest, journal) {
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+    this->updater_->deleteRRset(*this->soa_);
+    this->updater_->deleteRRset(*this->rrset_);
+    this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                               this->rrttl_));
+    this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+                                            this->soa_->getClass(),
+                                            "ns1.example.org. "
+                                            "admin.example.org. "
+                                            "1235 3600 1800 2419200 7200"));
+    this->updater_->addRRset(*this->soa_);
+    this->updater_->addRRset(*this->rrset_);
+    ASSERT_NO_THROW(this->updater_->commit());
     std::vector<JournalEntry> expected;
     expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
                                     DatabaseAccessor::DIFF_DELETE,
@@ -2830,21 +2961,21 @@ TEST_F(MockDatabaseClientTest, journal) {
                                     DatabaseAccessor::DIFF_ADD,
                                     "www.example.org.", "A", "3600",
                                     "192.0.2.2"));
-    current_accessor_->checkJournal(expected);
+    this->checkJournal(expected);
 }
 
 /*
  * Push multiple delete-add sequences. Checks it is allowed and all is
  * saved.
  */
-TEST_F(MockDatabaseClientTest, journalMultiple) {
+TYPED_TEST(DatabaseClientTest, journalMultiple) {
     std::vector<JournalEntry> expected;
-    updater_ = client_->getUpdater(zname_, false, true);
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
     std::string soa_rdata = "ns1.example.org. admin.example.org. "
         "1234 3600 1800 2419200 7200";
     for (size_t i(1); i < 100; ++ i) {
         // Remove the old SOA
-        updater_->deleteRRset(*soa_);
+        this->updater_->deleteRRset(*this->soa_);
         expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i - 1,
                                         DatabaseAccessor::DIFF_DELETE,
                                         "example.org.", "SOA", "3600",
@@ -2852,19 +2983,21 @@ TEST_F(MockDatabaseClientTest, journalMultiple) {
         // Create a new SOA
         soa_rdata = "ns1.example.org. admin.example.org. " +
             lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
-        soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
-        soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
-                                          soa_rdata));
+        this->soa_.reset(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                   this->rrttl_));
+        this->soa_->addRdata(rdata::createRdata(this->soa_->getType(),
+                                                this->soa_->getClass(),
+                                                soa_rdata));
         // Add the new SOA
-        updater_->addRRset(*soa_);
+        this->updater_->addRRset(*this->soa_);
         expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i,
                                         DatabaseAccessor::DIFF_ADD,
                                         "example.org.", "SOA", "3600",
                                         soa_rdata));
     }
-    ASSERT_NO_THROW(updater_->commit());
+    ASSERT_NO_THROW(this->updater_->commit());
     // Check the journal contains everything.
-    current_accessor_->checkJournal(expected);
+    this->checkJournal(expected);
 }
 
 /*
@@ -2872,46 +3005,50 @@ TEST_F(MockDatabaseClientTest, journalMultiple) {
  *
  * Note that we implicitly test in different testcases (these for add and
  * delete) that if the journaling is false, it doesn't expect the order.
+ *
+ * In this test we don't check with the real databases as this case shouldn't
+ * contain backend specific behavior.
  */
 TEST_F(MockDatabaseClientTest, journalBadSequence) {
     std::vector<JournalEntry> expected;
     {
         SCOPED_TRACE("Delete A before SOA");
-        updater_ = client_->getUpdater(zname_, false, true);
-        EXPECT_THROW(updater_->deleteRRset(*rrset_), isc::BadValue);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        EXPECT_THROW(this->updater_->deleteRRset(*this->rrset_),
+                     isc::BadValue);
         // Make sure the journal is empty now
-        current_accessor_->checkJournal(expected);
+        this->checkJournal(expected);
     }
 
     {
         SCOPED_TRACE("Add before delete");
-        updater_ = client_->getUpdater(zname_, false, true);
-        EXPECT_THROW(updater_->addRRset(*soa_), isc::BadValue);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+        EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
         // Make sure the journal is empty now
-        current_accessor_->checkJournal(expected);
+        this->checkJournal(expected);
     }
 
     {
         SCOPED_TRACE("Add A before SOA");
-        updater_ = client_->getUpdater(zname_, false, true);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
         // So far OK
-        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
         // But we miss the add SOA here
-        EXPECT_THROW(updater_->addRRset(*rrset_), isc::BadValue);
+        EXPECT_THROW(this->updater_->addRRset(*this->rrset_), isc::BadValue);
         // Make sure the journal contains only the first one
         expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
                                         DatabaseAccessor::DIFF_DELETE,
                                         "example.org.", "SOA", "3600",
                                         "ns1.example.org. admin.example.org. "
                                         "1234 3600 1800 2419200 7200"));
-        current_accessor_->checkJournal(expected);
+        this->checkJournal(expected);
     }
 
     {
         SCOPED_TRACE("Commit before add");
-        updater_ = client_->getUpdater(zname_, false, true);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
         // So far OK
-        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
         // Commit at the wrong time
         EXPECT_THROW(updater_->commit(), isc::BadValue);
         current_accessor_->checkJournal(expected);
@@ -2919,29 +3056,29 @@ TEST_F(MockDatabaseClientTest, journalBadSequence) {
 
     {
         SCOPED_TRACE("Delete two SOAs");
-        updater_ = client_->getUpdater(zname_, false, true);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
         // So far OK
-        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
         // Delete the SOA again
-        EXPECT_THROW(updater_->deleteRRset(*soa_), isc::BadValue);
-        current_accessor_->checkJournal(expected);
+        EXPECT_THROW(this->updater_->deleteRRset(*this->soa_), isc::BadValue);
+        this->checkJournal(expected);
     }
 
     {
         SCOPED_TRACE("Add two SOAs");
-        updater_ = client_->getUpdater(zname_, false, true);
+        this->updater_ = this->client_->getUpdater(this->zname_, false, true);
         // So far OK
-        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->deleteRRset(*this->soa_));
         // Still OK
-        EXPECT_NO_THROW(updater_->addRRset(*soa_));
+        EXPECT_NO_THROW(this->updater_->addRRset(*this->soa_));
         // But this one is added again
-        EXPECT_THROW(updater_->addRRset(*soa_), isc::BadValue);
+        EXPECT_THROW(this->updater_->addRRset(*this->soa_), isc::BadValue);
         expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
                                         DatabaseAccessor::DIFF_ADD,
                                         "example.org.", "SOA", "3600",
                                         "ns1.example.org. admin.example.org. "
                                         "1234 3600 1800 2419200 7200"));
-        current_accessor_->checkJournal(expected);
+        this->checkJournal(expected);
     }
 }
 
@@ -2949,8 +3086,9 @@ TEST_F(MockDatabaseClientTest, journalBadSequence) {
  * Test it rejects to store journals when we request it together with
  * erasing the whole zone.
  */
-TEST_F(MockDatabaseClientTest, journalOnErase) {
-    EXPECT_THROW(client_->getUpdater(zname_, true, true), isc::BadValue);
+TYPED_TEST(DatabaseClientTest, journalOnErase) {
+    EXPECT_THROW(this->client_->getUpdater(this->zname_, true, true),
+                 isc::BadValue);
 }
 
 /*
@@ -2974,4 +3112,149 @@ TEST_F(MockDatabaseClientTest, journalException) {
     EXPECT_THROW(updater_->deleteRRset(*soa_), DataSourceError);
 }
 
+//
+// Tests for the ZoneJournalReader
+//
+
+// Install a simple, commonly used diff sequence: making an update from one
+// SOA to another.  Return the end SOA RRset for the convenience of the caller.
+ConstRRsetPtr
+makeSimpleDiff(DataSourceClient& client, const Name& zname,
+               const RRClass& rrclass, ConstRRsetPtr begin_soa)
+{
+    ZoneUpdaterPtr updater = client.getUpdater(zname, false, true);
+    updater->deleteRRset(*begin_soa);
+    RRsetPtr soa_end(new RRset(zname, rrclass, RRType::SOA(), RRTTL(3600)));
+    soa_end->addRdata(rdata::createRdata(RRType::SOA(), rrclass,
+                                         "ns1.example.org. admin.example.org. "
+                                         "1235 3600 1800 2419200 7200"));
+    updater->addRRset(*soa_end);
+    updater->commit();
+
+    return (soa_end);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReader) {
+    // Check the simple case made by makeSimpleDiff.
+    ConstRRsetPtr soa_end = makeSimpleDiff(*this->client_, this->zname_,
+                                           this->qclass_, this->soa_);
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(this->zname_, 1234, 1235);
+    EXPECT_EQ(ZoneJournalReader::SUCCESS, result.first);
+    ZoneJournalReaderPtr jnl_reader = result.second;
+    ASSERT_TRUE(jnl_reader);
+    ConstRRsetPtr rrset = jnl_reader->getNextDiff();
+    ASSERT_TRUE(rrset);
+    isc::testutils::rrsetCheck(this->soa_, rrset);
+    rrset = jnl_reader->getNextDiff();
+    ASSERT_TRUE(rrset);
+    isc::testutils::rrsetCheck(soa_end, rrset);
+    rrset = jnl_reader->getNextDiff();
+    ASSERT_FALSE(rrset);
+
+    // Once it reaches the end of the sequence, further read attempt will
+    // result in exception.
+    EXPECT_THROW(jnl_reader->getNextDiff(), isc::InvalidOperation);
+}
+
+TYPED_TEST(DatabaseClientTest, readLargeJournal) {
+    // Similar to journalMultiple, but check that at a higher level.
+
+    this->updater_ = this->client_->getUpdater(this->zname_, false, true);
+
+    vector<ConstRRsetPtr> expected;
+    for (size_t i = 0; i < 100; ++i) {
+        // Create the old SOA and remove it, and record it in the expected list
+        RRsetPtr rrset1(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                  this->rrttl_));
+        string soa_rdata = "ns1.example.org. admin.example.org. " +
+            lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+        rrset1->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+                                            soa_rdata));
+        this->updater_->deleteRRset(*rrset1);
+        expected.push_back(rrset1);
+
+        // Create a new SOA, add it, and record it.
+        RRsetPtr rrset2(new RRset(this->zname_, this->qclass_, RRType::SOA(),
+                                  this->rrttl_));
+        soa_rdata = "ns1.example.org. admin.example.org. " +
+            lexical_cast<std::string>(1234 + i + 1) +
+            " 3600 1800 2419200 7200";
+        rrset2->addRdata(rdata::createRdata(RRType::SOA(), this->qclass_,
+                                            soa_rdata));
+        this->updater_->addRRset(*rrset2);
+        expected.push_back(rrset2);
+    }
+    this->updater_->commit();
+
+    ZoneJournalReaderPtr jnl_reader(this->client_->getJournalReader(
+                                        this->zname_, 1234, 1334).second);
+    ConstRRsetPtr actual;
+    int i = 0;
+    while ((actual = jnl_reader->getNextDiff()) != NULL) {
+        isc::testutils::rrsetCheck(expected.at(i++), actual);
+    }
+    EXPECT_EQ(expected.size(), i); // we should have eaten all expected data
+}
+
+TYPED_TEST(DatabaseClientTest, readJournalForNoRange) {
+    makeSimpleDiff(*this->client_, this->zname_, this->qclass_, this->soa_);
+
+    // The specified range does not exist in the diff storage.  The factory
+    // method should result in NO_SUCH_VERSION
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(this->zname_, 1200, 1235);
+    EXPECT_EQ(ZoneJournalReader::NO_SUCH_VERSION, result.first);
+    EXPECT_FALSE(result.second);
+}
+
+TYPED_TEST(DatabaseClientTest, journalReaderForNXZone) {
+    pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+        this->client_->getJournalReader(Name("nosuchzone"), 0, 1);
+    EXPECT_EQ(ZoneJournalReader::NO_SUCH_ZONE, result.first);
+    EXPECT_FALSE(result.second);
+}
+
+// A helper function for journalWithBadData.  It installs a simple diff
+// from one serial (of 'begin') to another ('begin' + 1), tweaking a specified
+// field of data with some invalid value.
+void
+installBadDiff(MockAccessor& accessor, uint32_t begin,
+               DatabaseAccessor::DiffRecordParams modify_param,
+               const char* const data)
+{
+    string data1[] = {"example.org.", "SOA", "3600", "ns. root. 1 1 1 1 1"};
+    string data2[] = {"example.org.", "SOA", "3600", "ns. root. 2 1 1 1 1"};
+    data1[modify_param] = data;
+    accessor.addRecordDiff(READONLY_ZONE_ID, begin,
+                           DatabaseAccessor::DIFF_DELETE, data1);
+    accessor.addRecordDiff(READONLY_ZONE_ID, begin + 1,
+                           DatabaseAccessor::DIFF_ADD, data2);
+}
+
+TEST_F(MockDatabaseClientTest, journalWithBadData) {
+    MockAccessor& mock_accessor =
+        dynamic_cast<MockAccessor&>(*current_accessor_);
+
+    // One of the fields from the data source is broken as an RR parameter.
+    // The journal reader should still be constructed, but getNextDiff()
+    // should result in exception.
+    installBadDiff(mock_accessor, 1, DatabaseAccessor::DIFF_NAME,
+                   "example..org");
+    installBadDiff(mock_accessor, 3, DatabaseAccessor::DIFF_TYPE,
+                   "bad-rrtype");
+    installBadDiff(mock_accessor, 5, DatabaseAccessor::DIFF_TTL,
+                   "bad-ttl");
+    installBadDiff(mock_accessor, 7, DatabaseAccessor::DIFF_RDATA,
+                   "bad rdata");
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 1, 2).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 3, 4).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 5, 6).
+                 second->getNextDiff(), DataSourceError);
+    EXPECT_THROW(this->client_->getJournalReader(this->zname_, 7, 8).
+                 second->getNextDiff(), DataSourceError);
+}
+
 }
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
index 0133508..e98f9bc 100644
--- a/src/lib/datasrc/tests/factory_unittest.cc
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -14,6 +14,7 @@
 
 #include <boost/scoped_ptr.hpp>
 
+#include <datasrc/datasrc_config.h>
 #include <datasrc/factory.h>
 #include <datasrc/data_source.h>
 #include <datasrc/sqlite3_accessor.h>
@@ -30,6 +31,70 @@ std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
 
 namespace {
 
+// note this helper only checks the error that is received up to the length
+// of the expected string. It will always pass if you give it an empty
+// expected_error
+void
+pathtestHelper(const std::string& file, const std::string& expected_error) {
+    std::string error;
+    try {
+        DataSourceClientContainer(file, ElementPtr());
+    } catch (const DataSourceLibraryError& dsle) {
+        error = dsle.what();
+    }
+    ASSERT_LT(expected_error.size(), error.size());
+    EXPECT_EQ(expected_error, error.substr(0, expected_error.size()));
+}
+
+TEST(FactoryTest, paths) {
+    // Test whether the paths are made absolute if they are not,
+    // by inspecting the error that is raised when they are wrong
+    const std::string error("dlopen failed for ");
+    // With the current implementation, we can safely assume this has
+    // been set for this test (as the loader would otherwise also fail
+    // unless the loadable backend library happens to be installed)
+    const std::string builddir(getenv("B10_FROM_BUILD"));
+
+    // Absolute and ending with .so should have no change
+    pathtestHelper("/no_such_file.so", error + "/no_such_file.so");
+
+    // If no ending in .so, it should get _ds.so
+    pathtestHelper("/no_such_file", error + "/no_such_file_ds.so");
+
+    // If not starting with /, path should be added. For this test that
+    // means the build directory as set in B10_FROM_BUILD
+    pathtestHelper("no_such_file.so", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so");
+    pathtestHelper("no_such_file", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file_ds.so");
+
+    // Some tests with '.so' in the name itself
+    pathtestHelper("no_such_file.so.something", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so.something_ds.so");
+    pathtestHelper("/no_such_file.so.something", error +
+                   "/no_such_file.so.something_ds.so");
+    pathtestHelper("/no_such_file.so.something.so", error +
+                   "/no_such_file.so.something.so");
+    pathtestHelper("/no_such_file.so.so", error +
+                   "/no_such_file.so.so");
+    pathtestHelper("no_such_file.so.something", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so.something_ds.so");
+
+    // Temporarily unset B10_FROM_BUILD to see that BACKEND_LIBRARY_PATH
+    // is used
+    unsetenv("B10_FROM_BUILD");
+    pathtestHelper("no_such_file.so", error + BACKEND_LIBRARY_PATH +
+                   "no_such_file.so");
+    // Put it back just in case
+    setenv("B10_FROM_BUILD", builddir.c_str(), 1);
+
+    // Test some bad input values
+    ASSERT_THROW(DataSourceClientContainer("", ElementPtr()),
+                 DataSourceLibraryError);
+    ASSERT_THROW(DataSourceClientContainer(".so", ElementPtr()),
+                 DataSourceLibraryError);
+}
+
 TEST(FactoryTest, sqlite3ClientBadConfig) {
     // We start out by building the configuration data bit by bit,
     // testing each form of 'bad config', until we have a good one.
diff --git a/src/lib/datasrc/tests/testdata/Makefile.am b/src/lib/datasrc/tests/testdata/Makefile.am
index 64ae955..6a35fe3 100644
--- a/src/lib/datasrc/tests/testdata/Makefile.am
+++ b/src/lib/datasrc/tests/testdata/Makefile.am
@@ -1,6 +1 @@
 CLEANFILES = *.copied
-BUILT_SOURCES = rwtest.sqlite3.copied
-
-# We use install-sh with the -m option to make sure it's writable
-rwtest.sqlite3.copied: $(srcdir)/rwtest.sqlite3
-	$(top_srcdir)/install-sh -m 644 $(srcdir)/rwtest.sqlite3 $@
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index afe06d9..e028bea 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -563,6 +563,98 @@ public:
 /// \brief A pointer-like type pointing to a \c ZoneUpdater object.
 typedef boost::shared_ptr<ZoneUpdater> ZoneUpdaterPtr;
 
+/// The base class for retrieving differences between two versions of a zone.
+///
+/// On construction, each derived class object will internally set up
+/// retrieving sequences of differences between two specific version of
+/// a specific zone managed in a particular data source.  So the constructor
+/// of a derived class would normally take parameters to identify the zone
+/// and the two versions for which the differences should be retrieved.
+/// See \c DataSourceClient::getJournalReader for more concrete details
+/// used in this API.
+///
+/// Once constructed, an object of this class will act like an iterator
+/// over the sequences.  Every time the \c getNextDiff() method is called
+/// it returns one element of the differences in the form of an \c RRset
+/// until it reaches the end of the entire sequences.
+class ZoneJournalReader {
+public:
+    /// Result codes used by a factory method for \c ZoneJournalReader
+    enum Result {
+        SUCCESS, ///< A \c ZoneJournalReader object successfully created
+        NO_SUCH_ZONE, ///< Specified zone does not exist in the data source
+        NO_SUCH_VERSION ///< Specified versions do not exist in the diff storage
+    };
+
+protected:
+    /// The default constructor.
+    ///
+    /// This is intentionally defined as protected to ensure that this base
+    /// class is never instantiated directly.
+    ZoneJournalReader() {}
+
+public:
+    /// The destructor
+    virtual ~ZoneJournalReader() {}
+
+    /// Return the next difference RR of difference sequences.
+    ///
+    /// In this API, the difference between two versions of a zone is
+    /// conceptually represented as IXFR-style difference sequences:
+    /// Each difference sequence is a sequence of RRs: an older version of
+    /// SOA (to be deleted), zero or more other deleted RRs, the
+    /// post-transaction SOA (to be added), and zero or more other
+    /// added RRs.  (Note, however, that the underlying data source
+    /// implementation may or may not represent the difference in
+    /// straightforward realization of this concept.  The mapping between
+    /// the conceptual difference and the actual implementation is hidden
+    /// in each derived class).
+    ///
+    /// This method provides an application with a higher level interface
+    /// to retrieve the difference along with the conceptual model: the
+    /// \c ZoneJournalReader object iterates over the entire sequences
+    /// from the beginning SOA (which is to be deleted) to one of the
+    /// added RR of with the ending SOA, and each call to this method returns
+    /// one RR in the form of an \c RRset that contains exactly one RDATA
+    /// in the order of the sequences.
+    ///
+    /// Note that the ordering of the sequences specifies the semantics of
+    /// each difference: add or delete.  For example, the first RR is to
+    /// be deleted, and the last RR is to be added.  So the return value
+    /// of this method does not explicitly indicate whether the RR is to be
+    /// added or deleted.
+    ///
+    /// This method ensures the returned \c RRset represents an RR, that is,
+    /// it contains exactly one RDATA.  However, it does not necessarily
+    /// ensure that the resulting sequences are in the form of IXFR-style.
+    /// For example, the first RR is supposed to be an SOA, and it should
+    /// normally be the case, but this interface does not necessarily require
+    /// the derived class implementation ensure this.  Normally the
+    /// differences are expected to be stored using this API (via a
+    /// \c ZoneUpdater object), and as long as that is the case and the
+    /// underlying implementation follows the requirement of the API, the
+    /// result of this method should be a valid IXFR-style sequences.
+    /// So this API does not mandate the almost redundant check as part of
+    /// the interface.  If the application needs to make it sure 100%, it
+    /// must check the resulting sequence itself.
+    ///
+    /// Once the object reaches the end of the sequences, this method returns
+    /// \c Null.  Any subsequent call will result in an exception of
+    /// class \c InvalidOperation.
+    ///
+    /// \exception InvalidOperation The method is called beyond the end of
+    /// the difference sequences.
+    /// \exception DataSourceError Underlying data is broken and the RR
+    /// cannot be created or other low level data source error.
+    ///
+    /// \return An \c RRset that contains one RDATA corresponding to the
+    /// next difference in the sequences.
+    virtual isc::dns::ConstRRsetPtr getNextDiff() = 0;
+};
+
+/// \brief A pointer-like type pointing to a \c ZoneUpdater object.
+typedef boost::shared_ptr<ZoneJournalReader> ZoneJournalReaderPtr;
+
 } // end of datasrc
 } // end of isc
 
diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am
index 64dda17..3991033 100644
--- a/src/lib/dhcp/Makefile.am
+++ b/src/lib/dhcp/Makefile.am
@@ -14,6 +14,7 @@ libdhcp_la_SOURCES += option.cc option.h
 libdhcp_la_SOURCES += option6_ia.cc option6_ia.h
 libdhcp_la_SOURCES += option6_iaaddr.cc option6_iaaddr.h
 libdhcp_la_SOURCES += option6_addrlst.cc option6_addrlst.h
+libdhcp_la_SOURCES += option4_addrlst.cc option4_addrlst.h
 libdhcp_la_SOURCES += dhcp6.h dhcp4.h
 libdhcp_la_SOURCES += pkt6.cc pkt6.h
 libdhcp_la_SOURCES += pkt4.cc pkt4.h
diff --git a/src/lib/dhcp/libdhcp.cc b/src/lib/dhcp/libdhcp.cc
index b95a427..f84e495 100644
--- a/src/lib/dhcp/libdhcp.cc
+++ b/src/lib/dhcp/libdhcp.cc
@@ -17,6 +17,7 @@
 #include <util/buffer.h>
 #include <dhcp/libdhcp.h>
 #include "config.h"
+#include <dhcp/dhcp4.h>
 #include <dhcp/dhcp6.h>
 #include <dhcp/option.h>
 #include <dhcp/option6_ia.h>
@@ -90,8 +91,17 @@ LibDHCP::unpackOptions4(const std::vector<uint8_t>& buf,
     size_t offset = 0;
 
     // 2 - header of DHCPv4 option
-    while (offset + 2 <= buf.size()) {
+    while (offset + 1 <= buf.size()) {
         uint8_t opt_type = buf[offset++];
+        if (offset + 1 == buf.size()) {
+            if (opt_type == DHO_END)
+                return; // just return. Don't need to add DHO_END option
+            else {
+                isc_throw(OutOfRange, "Attempt to parse truncated option "
+                          << opt_type);
+            }
+        }
+
         uint8_t opt_len =  buf[offset++];
         if (offset + opt_len > buf.size() ) {
             isc_throw(OutOfRange, "Option parse failed. Tried to parse "
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
index daef288..20dd97a 100644
--- a/src/lib/dhcp/option.cc
+++ b/src/lib/dhcp/option.cc
@@ -128,23 +128,6 @@ Option::pack4(isc::util::OutputBuffer& buf) {
 }
 
 unsigned int
-Option::pack4(boost::shared_array<uint8_t>& buf,
-             unsigned int buf_len,
-             unsigned int offset) {
-    if (offset + len() > buf_len) {
-        isc_throw(OutOfRange, "Failed to pack v4 option=" <<
-                  type_ << ",len=" << len() << ": too small buffer.");
-    }
-    uint8_t *ptr = &buf[offset];
-    ptr[0] = type_;
-    ptr[1] = len() - getHeaderLen();
-    ptr += 2;
-    memcpy(ptr, &data_[0], data_.size());
-
-    return offset + len();
-}
-
-unsigned int
 Option::pack6(boost::shared_array<uint8_t>& buf,
              unsigned int buf_len,
              unsigned int offset) {
@@ -220,7 +203,7 @@ Option::unpack6(const boost::shared_array<uint8_t>& buf,
 
 /// Returns length of the complete option (data length + DHCPv4/DHCPv6
 /// option header)
-unsigned short
+uint16_t
 Option::len() {
 
     // length of the whole option is header and data stored in this option...
@@ -295,17 +278,7 @@ std::string Option::toText(int indent /* =0 */ ) {
     return tmp.str();
 }
 
-unsigned short
-Option::getType() {
-    return type_;
-}
-
-const std::vector<uint8_t>&
-Option::getData() {
-    return (data_);
-}
-
-unsigned short
+uint16_t
 Option::getHeaderLen() {
     switch (universe_) {
     case V4:
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
index 3822cf0..088d094 100644
--- a/src/lib/dhcp/option.h
+++ b/src/lib/dhcp/option.h
@@ -178,20 +178,19 @@ public:
     /// Returns option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
     ///
     /// @return option type
-    unsigned short
-    getType();
+    unsigned short getType() { return (type_); }
 
     /// Returns length of the complete option (data length + DHCPv4/DHCPv6
     /// option header)
     ///
     /// @return length of the option
-    virtual unsigned short
+    virtual uint16_t
     len();
 
     /// @brief Returns length of header (2 for v4, 4 for v6)
     ///
     /// @return length of option header
-    virtual unsigned short
+    virtual uint16_t
     getHeaderLen();
 
     /// returns if option is valid (e.g. option may be truncated)
@@ -202,9 +201,9 @@ public:
 
     /// Returns pointer to actual data.
     ///
-    /// @return pointer to actual data (or NULL if there is no data)
-    virtual const std::vector<uint8_t>&
-    getData();
+    /// @return pointer to actual data (or reference to an empty vector
+    ///         if there is no data)
+    virtual const std::vector<uint8_t>& getData() { return (data_); }
 
     /// Adds a sub-option.
     ///
@@ -242,20 +241,6 @@ public:
     ~Option();
 
 protected:
-
-    /// Builds raw (over-wire) buffer of this option, including all
-    /// defined suboptions. Version for building DHCPv4 options.
-    ///
-    /// @param buf output buffer (built options will be stored here)
-    /// @param buf_len buffer length (used for buffer overflow checks)
-    /// @param offset offset from start of the buf buffer
-    ///
-    /// @return offset to the next byte after last used byte
-    virtual unsigned int
-    pack4(boost::shared_array<uint8_t>& buf,
-          unsigned int buf_len,
-          unsigned int offset);
-
     /// Builds raw (over-wire) buffer of this option, including all
     /// defined suboptions. Version for building DHCPv4 options.
     ///
diff --git a/src/lib/dhcp/option4_addrlst.cc b/src/lib/dhcp/option4_addrlst.cc
new file mode 100644
index 0000000..88eb915
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.cc
@@ -0,0 +1,135 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string.h>
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include <iomanip>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <util/io_utilities.h>
+#include <dhcp/option4_addrlst.h>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+using namespace isc::asiolink;
+
+Option4AddrLst::Option4AddrLst(uint8_t type)
+    :Option(V4, type) {
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const AddressContainer& addrs)
+    :Option(V4, type) {
+    setAddresses(addrs);
+    // don't set addrs_ directly. setAddresses() will do additional checks.
+}
+
+
+Option4AddrLst::Option4AddrLst(uint8_t type,
+                               vector<uint8_t>::const_iterator first,
+                               vector<uint8_t>::const_iterator last)
+    :Option(V4, type) {
+    if ( (distance(first, last) % V4ADDRESS_LEN) ) {
+        isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_
+                  << " has invalid length=" << distance(first, last)
+                  << ", must be divisible by 4.");
+    }
+
+    while (first != last) {
+        const uint8_t* ptr = &(*first);
+        addAddress(IOAddress(readUint32(ptr)));
+        first += V4ADDRESS_LEN;
+    }
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const IOAddress& addr)
+    :Option(V4, type) {
+    setAddress(addr);
+}
+
+void
+Option4AddrLst::pack4(isc::util::OutputBuffer& buf) {
+
+    if (addrs_.size() * V4ADDRESS_LEN > 255) {
+        isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_ << " is too big."
+                  << "At most 255 bytes are supported.");
+        /// TODO Larger options can be stored as separate instances
+        /// of DHCPv4 options. Clients MUST concatenate them.
+        /// Fortunately, there are no such large options used today.
+    }
+
+    buf.writeUint8(type_);
+    buf.writeUint8(len() - getHeaderLen());
+
+    AddressContainer::const_iterator addr = addrs_.begin();
+
+    while (addr != addrs_.end()) {
+        buf.writeUint32(*addr);
+        ++addr;
+    }
+}
+
+void Option4AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET) {
+        isc_throw(BadValue, "Can't store non-IPv4 address in "
+                  << "Option4AddrLst option");
+    }
+    addrs_.clear();
+    addAddress(addr);
+}
+
+void Option4AddrLst::setAddresses(const AddressContainer& addrs) {
+
+    // Do not copy it as a whole. addAddress() does sanity checks.
+    // i.e. throw if someone tries to set IPv6 address.
+    addrs_.clear();
+    for (AddressContainer::const_iterator addr = addrs.begin();
+         addr != addrs.end(); ++addr) {
+        addAddress(*addr);
+    }
+}
+
+
+void Option4AddrLst::addAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET) {
+        isc_throw(BadValue, "Can't store non-IPv4 address in "
+                  << "Option4AddrLst option");
+    }
+    addrs_.push_back(addr);
+}
+
+uint16_t Option4AddrLst::len() {
+
+    // Returns length of the complete option (option header + data length)
+    return (getHeaderLen() + addrs_.size() * V4ADDRESS_LEN);
+}
+
+std::string Option4AddrLst::toText(int indent /* =0 */ ) {
+    std::stringstream tmp;
+
+    for (int i = 0; i < indent; i++) {
+        tmp << " ";
+    }
+
+    tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ":";
+
+    for (AddressContainer::const_iterator addr = addrs_.begin();
+         addr != addrs_.end(); ++addr) {
+        tmp << " " << (*addr);
+    }
+
+    return tmp.str();
+}
diff --git a/src/lib/dhcp/option4_addrlst.h b/src/lib/dhcp/option4_addrlst.h
new file mode 100644
index 0000000..c795805
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.h
@@ -0,0 +1,167 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION4_ADDRLST_H_
+#define OPTION4_ADDRLST_H_
+
+#include <string>
+#include <map>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+#include <dhcp/option.h>
+
+namespace isc {
+namespace dhcp {
+
+
+/// @brief DHCPv4 Option class for handling list of IPv4 addresses.
+///
+/// This class handles a list of IPv4 addresses. An example of such option
+/// is dns-servers option. It can also be used to handle a single address.
+class Option4AddrLst : public isc::dhcp::Option {
+public:
+
+    /// Defines a collection of IPv4 addresses.
+    typedef std::vector<isc::asiolink::IOAddress> AddressContainer;
+
+    /// @brief Constructor, creates an option with empty list of addresses.
+    ///
+    /// Creates empty option that can hold addresses. Addresses can be added
+    /// with addAddress(), setAddress() or setAddresses().
+    ///
+    /// @param type option type
+    Option4AddrLst(uint8_t type);
+
+    /// @brief Constructor, creates an option with a list of addresses.
+    ///
+    /// Creates an option that contains specified list of IPv4 addresses.
+    ///
+    /// @param type option type
+    /// @param addrs container with a list of addresses
+    Option4AddrLst(uint8_t type, const AddressContainer& addrs);
+
+    /// @brief Constructor, creates an option with a single address.
+    ///
+    /// Creates an option that contains a single address.
+    ///
+    /// @param type option type
+    /// @param addr a single address that will be stored as 1-elem. address list
+    Option4AddrLst(uint8_t type, const isc::asiolink::IOAddress& addr);
+
+    /// @brief Constructor, used for received options.
+    ///
+    /// TODO: This can be templated to use different containers, not just
+    /// vector. Prototype should look like this:
+    /// template<typename InputIterator> Option(Universe u, uint16_t type,
+    /// InputIterator first, InputIterator last);
+    ///
+    /// vector<int8_t> myData;
+    /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+    /// This will create DHCPv4 option of type 123 that contains data from
+    /// trimmed (first and last byte removed) myData vector.
+    ///
+    /// @param type option type (0-255 for V4 and 0-65535 for V6)
+    /// @param first iterator to the first element that should be copied
+    /// @param last iterator to the next element after the last one
+    ///        to be copied.
+    Option4AddrLst(uint8_t type, std::vector<uint8_t>::const_iterator first,
+           std::vector<uint8_t>::const_iterator last);
+
+    /// @brief Writes option in a wire-format to a buffer.
+    ///
+    /// Method will throw if option storing fails for some reason.
+    ///
+    /// TODO Once old (DHCPv6) implementation is rewritten,
+    /// unify pack4() and pack6() and rename them to just pack().
+    ///
+    /// @param buf output buffer (option will be stored there)
+    virtual void
+    pack4(isc::util::OutputBuffer& buf);
+
+    /// Returns string representation of the option.
+    ///
+    /// @param indent number of spaces before printing text
+    ///
+    /// @return string with text representation.
+    virtual std::string
+    toText(int indent = 0);
+
+    /// Returns length of the complete option (data length + DHCPv4/DHCPv6
+    /// option header)
+    ///
+    /// @return length of the option
+    virtual uint16_t len();
+
+    /// @brief Returns vector with addresses.
+    ///
+    /// We return a copy of our list. Although this includes overhead,
+    /// it also makes this list safe to use after this option object
+    /// is no longer available. As options are expected to hold only
+    /// a couple (1-3) addresses, the overhead is not that big.
+    ///
+    /// @return address container with addresses
+    AddressContainer
+    getAddresses() { return addrs_; };
+
+    /// @brief Sets addresses list.
+    ///
+    /// Clears existing list of addresses and adds a single address to that
+    /// list. This is very convenient method for options that are supposed to
+    /// only a single option. See addAddress() if you want to add
+    /// address to existing list or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addrs address collection to be set
+    void setAddresses(const AddressContainer& addrs);
+
+    /// @brief Clears address list and sets a single address.
+    ///
+    /// Clears existing list of addresses and adds a single address to that
+    /// list. This is very convenient method for options that are supposed to
+    /// only a single option. See addAddress() if you want to add
+    /// address to existing list or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addr an address that is going to be set as 1-element address list
+    void setAddress(const isc::asiolink::IOAddress& addr);
+
+    /// @brief Adds address to existing list of addresses.
+    ///
+    /// Adds a single address to that list. See setAddress() if you want to
+    /// define only a single address or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addr an address thait is going to be added to existing list
+    void addAddress(const isc::asiolink::IOAddress& addr);
+
+protected:
+    /// contains list of addresses
+    AddressContainer addrs_;
+};
+
+} // namespace isc::dhcp
+} // namespace isc
+
+#endif
diff --git a/src/lib/dhcp/option6_addrlst.cc b/src/lib/dhcp/option6_addrlst.cc
index fc981fa..9be3810 100644
--- a/src/lib/dhcp/option6_addrlst.cc
+++ b/src/lib/dhcp/option6_addrlst.cc
@@ -50,6 +50,10 @@ Option6AddrLst::Option6AddrLst(unsigned short type,
 
 void
 Option6AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET6) {
+        isc_throw(BadValue, "Can't store non-IPv6 address in Option6AddrLst option");
+    }
+
     addrs_.clear();
     addrs_.push_back(addr);
 }
@@ -128,7 +132,7 @@ std::string Option6AddrLst::toText(int indent /* =0 */) {
     return tmp.str();
 }
 
-unsigned short Option6AddrLst::len() {
+uint16_t Option6AddrLst::len() {
 
     return (OPTION6_HDR_LEN + addrs_.size()*16);
 }
diff --git a/src/lib/dhcp/option6_addrlst.h b/src/lib/dhcp/option6_addrlst.h
index c5b32af..a73dc55 100644
--- a/src/lib/dhcp/option6_addrlst.h
+++ b/src/lib/dhcp/option6_addrlst.h
@@ -16,17 +16,16 @@
 #define OPTION6_ADDRLST_H_
 
 #include <vector>
-#include "asiolink/io_address.h"
-#include "dhcp/option.h"
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
 
 namespace isc {
 namespace dhcp {
 
-/// @brief Option class for handling list of IPv6 addresses.
+/// @brief DHCPv6 Option class for handling list of IPv6 addresses.
 ///
 /// This class handles a list of IPv6 addresses. An example of such option
 /// is dns-servers option. It can also be used to handle single address.
-///
 class Option6AddrLst: public Option {
 
 public:
@@ -105,17 +104,17 @@ public:
 
     /// @brief Returns vector with addresses.
     ///
-    /// As user may want to use/modify this list, it is better to return
-    /// a copy rather than const reference to the original. This is
-    /// usually one or two addresses long, so it is not a big deal.
-    ///
-    /// @return vector with addresses
+    /// We return a copy of our list. Although this includes overhead,
+    /// it also makes this list safe to use after this option object
+    /// is no longer available. As options are expected to hold only
+    /// a couple (1-3) addresses, the overhead is not that big.
     ///
+    /// @return address container with addresses
     AddressContainer
     getAddresses() { return addrs_; };
 
     // returns data length (data length + DHCPv4/DHCPv6 option header)
-    virtual unsigned short len();
+    virtual uint16_t len();
 
 protected:
     AddressContainer addrs_;
diff --git a/src/lib/dhcp/option6_ia.cc b/src/lib/dhcp/option6_ia.cc
index 46daee1..209f500 100644
--- a/src/lib/dhcp/option6_ia.cc
+++ b/src/lib/dhcp/option6_ia.cc
@@ -77,7 +77,7 @@ Option6IA::unpack(const boost::shared_array<uint8_t>& buf,
     if ( parse_len < OPTION6_IA_LEN || offset + OPTION6_IA_LEN > buf_len) {
         isc_throw(OutOfRange, "Option " << type_ << " truncated");
     }
-    
+
     iaid_ = readUint32(&buf[offset]);
     offset += sizeof(uint32_t);
 
@@ -121,9 +121,9 @@ std::string Option6IA::toText(int indent /* = 0*/) {
     return tmp.str();
 }
 
-unsigned short Option6IA::len() {
+uint16_t Option6IA::len() {
 
-    unsigned short length = OPTION6_HDR_LEN /*header (4)*/ +
+    uint16_t length = OPTION6_HDR_LEN /*header (4)*/ +
         OPTION6_IA_LEN  /* option content (12) */;
 
     // length of all suboptions
diff --git a/src/lib/dhcp/option6_ia.h b/src/lib/dhcp/option6_ia.h
index 516b2fc..cab8068 100644
--- a/src/lib/dhcp/option6_ia.h
+++ b/src/lib/dhcp/option6_ia.h
@@ -116,7 +116,7 @@ public:
     /// Returns length of this option, including option header and suboptions
     ///
     /// @return length of this option
-    virtual unsigned short
+    virtual uint16_t
     len();
 
 protected:
diff --git a/src/lib/dhcp/option6_iaaddr.cc b/src/lib/dhcp/option6_iaaddr.cc
index 4177714..fd3bca4 100644
--- a/src/lib/dhcp/option6_iaaddr.cc
+++ b/src/lib/dhcp/option6_iaaddr.cc
@@ -116,9 +116,9 @@ std::string Option6IAAddr::toText(int indent /* =0 */) {
     return tmp.str();
 }
 
-unsigned short Option6IAAddr::len() {
+uint16_t Option6IAAddr::len() {
 
-    unsigned short length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
+    uint16_t length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
 
     // length of all suboptions
     // TODO implement:
diff --git a/src/lib/dhcp/option6_iaaddr.h b/src/lib/dhcp/option6_iaaddr.h
index 60c5c48..40e5967 100644
--- a/src/lib/dhcp/option6_iaaddr.h
+++ b/src/lib/dhcp/option6_iaaddr.h
@@ -126,8 +126,7 @@ public:
     getValid() const { return valid_; }
 
     /// returns data length (data length + DHCPv4/DHCPv6 option header)
-    virtual unsigned short
-    len();
+    virtual uint16_t len();
 
 protected:
     /// contains an IPv6 address
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index ba07a10..bea93fc 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -47,11 +47,9 @@ Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
       yiaddr_(DEFAULT_ADDRESS),
       siaddr_(DEFAULT_ADDRESS),
       giaddr_(DEFAULT_ADDRESS),
-      bufferIn_(NULL, 0), // not used, this is TX packet
       bufferOut_(DHCPV4_PKT_HDR_LEN),
       msg_type_(msg_type)
 {
-    /// TODO: fixed fields, uncomment in ticket #1224
     memset(chaddr_, 0, MAX_CHADDR_LEN);
     memset(sname_, 0, MAX_SNAME_LEN);
     memset(file_, 0, MAX_FILE_LEN);
@@ -64,7 +62,6 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
       ifindex_(-1),
       local_port_(DHCP4_SERVER_PORT),
       remote_port_(DHCP4_CLIENT_PORT),
-      /// TODO Fixed fields, uncomment in ticket #1224
       op_(BOOTREQUEST),
       transid_(0),
       secs_(0),
@@ -73,7 +70,6 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
       yiaddr_(DEFAULT_ADDRESS),
       siaddr_(DEFAULT_ADDRESS),
       giaddr_(DEFAULT_ADDRESS),
-      bufferIn_(data, len),
       bufferOut_(0), // not used, this is RX packet
       msg_type_(DHCPDISCOVER)
 {
@@ -82,6 +78,9 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
                   << " received, at least " << DHCPV4_PKT_HDR_LEN
                   << "is expected");
     }
+
+    data_.resize(len);
+    memcpy(&data_[0], data, len);
 }
 
 size_t
@@ -117,35 +116,43 @@ Pkt4::pack() {
 
     LibDHCP::packOptions(bufferOut_, options_);
 
+    // add END option that indicates end of options
+    // (End option is very simple, just a 255 octet)
+    bufferOut_.writeUint8(DHO_END);
+
     return (true);
 }
 bool
 Pkt4::unpack() {
-    if (bufferIn_.getLength()<DHCPV4_PKT_HDR_LEN) {
+
+    // input buffer (used during message reception)
+    isc::util::InputBuffer bufferIn(&data_[0], data_.size());
+
+    if (bufferIn.getLength()<DHCPV4_PKT_HDR_LEN) {
         isc_throw(OutOfRange, "Received truncated DHCPv4 packet (len="
-                  << bufferIn_.getLength() << " received, at least "
+                  << bufferIn.getLength() << " received, at least "
                   << DHCPV4_PKT_HDR_LEN << "is expected");
     }
 
-    op_ = bufferIn_.readUint8();
-    htype_ = bufferIn_.readUint8();
-    hlen_ = bufferIn_.readUint8();
-    hops_ = bufferIn_.readUint8();
-    transid_ = bufferIn_.readUint32();
-    secs_ = bufferIn_.readUint16();
-    flags_ = bufferIn_.readUint16();
-    ciaddr_ = IOAddress(bufferIn_.readUint32());
-    yiaddr_ = IOAddress(bufferIn_.readUint32());
-    siaddr_ = IOAddress(bufferIn_.readUint32());
-    giaddr_ = IOAddress(bufferIn_.readUint32());
-    bufferIn_.readData(chaddr_, MAX_CHADDR_LEN);
-    bufferIn_.readData(sname_, MAX_SNAME_LEN);
-    bufferIn_.readData(file_, MAX_FILE_LEN);
-
-    size_t opts_len = bufferIn_.getLength() - bufferIn_.getPosition();
+    op_ = bufferIn.readUint8();
+    htype_ = bufferIn.readUint8();
+    hlen_ = bufferIn.readUint8();
+    hops_ = bufferIn.readUint8();
+    transid_ = bufferIn.readUint32();
+    secs_ = bufferIn.readUint16();
+    flags_ = bufferIn.readUint16();
+    ciaddr_ = IOAddress(bufferIn.readUint32());
+    yiaddr_ = IOAddress(bufferIn.readUint32());
+    siaddr_ = IOAddress(bufferIn.readUint32());
+    giaddr_ = IOAddress(bufferIn.readUint32());
+    bufferIn.readData(chaddr_, MAX_CHADDR_LEN);
+    bufferIn.readData(sname_, MAX_SNAME_LEN);
+    bufferIn.readData(file_, MAX_FILE_LEN);
+
+    size_t opts_len = bufferIn.getLength() - bufferIn.getPosition();
     vector<uint8_t> optsBuffer;
     // fist use of readVector
-    bufferIn_.readVector(optsBuffer, opts_len);
+    bufferIn.readVector(optsBuffer, opts_len);
     LibDHCP::unpackOptions4(optsBuffer, options_);
 
     return (true);
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
index 8517091..189d95d 100644
--- a/src/lib/dhcp/pkt4.h
+++ b/src/lib/dhcp/pkt4.h
@@ -299,10 +299,21 @@ public:
     ///
     /// @return returns option of requested type (or NULL)
     ///         if no such option is present
-
     boost::shared_ptr<Option>
     getOption(uint8_t opt_type);
 
+
+    /// @brief set interface over which packet should be sent
+    ///
+    /// @param interface defines outbound interface
+    void setIface(const std::string& interface){ iface_ = interface; }
+
+    /// @brief gets interface over which packet was received or
+    ///        will be transmitted
+    ///
+    /// @return name of the interface
+    std::string getIface() const { return iface_; }
+
 protected:
 
     /// converts DHCP message type to BOOTP op type
@@ -385,14 +396,15 @@ protected:
 
     // end of real DHCPv4 fields
 
-    /// input buffer (used during message reception)
-    /// Note that it must be modifiable as hooks can modify incoming buffer),
-    /// thus OutputBuffer, not InputBuffer
-    isc::util::InputBuffer bufferIn_;
-
     /// output buffer (used during message
     isc::util::OutputBuffer bufferOut_;
 
+    // that's the data of input buffer used in RX packet. Note that
+    // InputBuffer does not store the data itself, but just expects that
+    // data will be valid for the whole life of InputBuffer. Therefore we
+    // need to keep the data around.
+    std::vector<uint8_t> data_;
+
     /// message type (e.g. 1=DHCPDISCOVER)
     /// TODO: this will eventually be replaced with DHCP Message Type
     /// option (option 53)
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
index 01799da..176992f 100644
--- a/src/lib/dhcp/tests/Makefile.am
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -18,6 +18,7 @@ libdhcp_unittests_SOURCES += ../libdhcp.h ../libdhcp.cc libdhcp_unittest.cc
 libdhcp_unittests_SOURCES += ../option6_iaaddr.h ../option6_iaaddr.cc option6_iaaddr_unittest.cc
 libdhcp_unittests_SOURCES += ../option6_ia.h ../option6_ia.cc option6_ia_unittest.cc
 libdhcp_unittests_SOURCES += ../option6_addrlst.h ../option6_addrlst.cc option6_addrlst_unittest.cc
+libdhcp_unittests_SOURCES += ../option4_addrlst.cc ../option4_addrlst.h option4_addrlst_unittest.cc
 libdhcp_unittests_SOURCES += ../option.h ../option.cc option_unittest.cc
 libdhcp_unittests_SOURCES += ../pkt6.h ../pkt6.cc pkt6_unittest.cc
 libdhcp_unittests_SOURCES += ../pkt4.h ../pkt4.cc pkt4_unittest.cc
diff --git a/src/lib/dhcp/tests/option4_addrlst_unittest.cc b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
new file mode 100644
index 0000000..d4ecf80
--- /dev/null
+++ b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
@@ -0,0 +1,273 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp4.h>
+#include <dhcp/option.h>
+#include <dhcp/option4_addrlst.h>
+#include <util/buffer.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+namespace {
+
+// a sample data (list of 4 addresses)
+const uint8_t sampledata[] = {
+    192, 0, 2, 3,     // 192.0.2.3
+    255, 255, 255, 0, // 255.255.255.0 - popular netmask
+    0, 0, 0 , 0,      // used for default routes or (any address)
+    127, 0, 0, 1      // loopback
+};
+
+// expected on-wire format for an option with 1 address
+const uint8_t expected1[] = { // 1 address
+    DHO_DOMAIN_NAME_SERVERS, 4, // type, length
+    192, 0, 2, 3,     // 192.0.2.3
+};
+
+// expected on-wire format for an option with 4 addresses
+const uint8_t expected4[] = { // 4 addresses
+    254, 16,            // type = 254, len = 16
+    192, 0, 2, 3,       // 192.0.2.3
+    255, 255, 255, 0,   // 255.255.255.0 - popular netmask
+    0, 0, 0 ,0,         // used for default routes or (any address)
+    127, 0, 0, 1        // loopback
+};
+
+class Option4AddrLstTest : public ::testing::Test {
+protected:
+
+    Option4AddrLstTest():
+        vec_(vector<uint8_t>(300,0)) // 300 bytes long filled with 0s
+    {
+        sampleAddrs_.push_back(IOAddress("192.0.2.3"));
+        sampleAddrs_.push_back(IOAddress("255.255.255.0"));
+        sampleAddrs_.push_back(IOAddress("0.0.0.0"));
+        sampleAddrs_.push_back(IOAddress("127.0.0.1"));
+    }
+
+    vector<uint8_t> vec_;
+    Option4AddrLst::AddressContainer sampleAddrs_;
+
+};
+
+TEST_F(Option4AddrLstTest, parse1) {
+
+    memcpy(&vec_[0], sampledata, sizeof(sampledata));
+
+    // just one address
+    Option4AddrLst* opt1 = 0;
+    EXPECT_NO_THROW(
+        opt1 = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS,
+                                  vec_.begin(),
+                                  vec_.begin()+4);
+        // use just first address (4 bytes), not the whole
+        // sampledata
+    );
+
+    EXPECT_EQ(Option::V4, opt1->getUniverse());
+
+    EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt1->getType());
+    EXPECT_EQ(6, opt1->len()); // 2 (header) + 4 (1x IPv4 addr)
+
+    Option4AddrLst::AddressContainer addrs = opt1->getAddresses();
+    ASSERT_EQ(1, addrs.size());
+
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+    EXPECT_NO_THROW(
+        delete opt1;
+        opt1 = 0;
+    );
+
+    // 1 address
+}
+
+TEST_F(Option4AddrLstTest, parse4) {
+
+    vector<uint8_t> buffer(300,0); // 300 bytes long filled with 0s
+
+    memcpy(&buffer[0], sampledata, sizeof(sampledata));
+
+    // 4 addresses
+    Option4AddrLst* opt4 = 0;
+    EXPECT_NO_THROW(
+        opt4 = new Option4AddrLst(254,
+                                  buffer.begin(),
+                                  buffer.begin()+sizeof(sampledata));
+    );
+
+    EXPECT_EQ(Option::V4, opt4->getUniverse());
+
+    EXPECT_EQ(254, opt4->getType());
+    EXPECT_EQ(18, opt4->len()); // 2 (header) + 16 (4x IPv4 addrs)
+
+    Option4AddrLst::AddressContainer addrs = opt4->getAddresses();
+    ASSERT_EQ(4, addrs.size());
+
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    EXPECT_NO_THROW(
+        delete opt4;
+        opt4 = 0;
+    );
+}
+
+TEST_F(Option4AddrLstTest, assembly1) {
+
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("192.0.2.3"));
+    );
+    EXPECT_EQ(Option::V4, opt->getUniverse());
+    EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt->getType());
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(1, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW(
+        opt->pack4(buf);
+    );
+
+    ASSERT_EQ(6, opt->len());
+    ASSERT_EQ(6, buf.getLength());
+
+    EXPECT_EQ(0, memcmp(expected1, buf.getData(), 6));
+
+    EXPECT_NO_THROW(
+        delete opt;
+        opt = 0;
+    );
+
+    // This is old-fashioned option. We don't serve IPv6 types here!
+    EXPECT_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("2001:db8::1")),
+        BadValue
+    );
+    if (opt) {
+        // test failed. Execption was not thrown, but option was created instead.
+        delete opt;
+    }
+}
+
+TEST_F(Option4AddrLstTest, assembly4) {
+
+
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(254, sampleAddrs_);
+    );
+    EXPECT_EQ(Option::V4, opt->getUniverse());
+    EXPECT_EQ(254, opt->getType());
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(4, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW(
+        opt->pack4(buf);
+    );
+
+    ASSERT_EQ(18, opt->len()); // 2(header) + 4xsizeof(IPv4addr)
+    ASSERT_EQ(18, buf.getLength());
+
+    ASSERT_EQ(0, memcmp(expected4, buf.getData(), 18));
+
+    EXPECT_NO_THROW(
+        delete opt;
+        opt = 0;
+    );
+
+    // This is old-fashioned option. We don't serve IPv6 types here!
+    sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+    EXPECT_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, sampleAddrs_),
+        BadValue
+    );
+    if (opt) {
+        // test failed. Execption was not thrown, but option was created instead.
+        delete opt;
+    }
+}
+
+TEST_F(Option4AddrLstTest, setAddress) {
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(123, IOAddress("1.2.3.4"));
+    );
+    opt->setAddress(IOAddress("192.0.255.255"));
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(1, addrs.size() );
+    EXPECT_EQ("192.0.255.255", addrs[0].toText());
+
+    // We should accept IPv4-only addresses.
+    EXPECT_THROW(
+        opt->setAddress(IOAddress("2001:db8::1")),
+        BadValue
+    );
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+TEST_F(Option4AddrLstTest, setAddresses) {
+
+    Option4AddrLst* opt = 0;
+
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(123); // empty list
+    );
+
+    opt->setAddresses(sampleAddrs_);
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(4, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    // We should accept IPv4-only addresses.
+    sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+    EXPECT_THROW(
+        opt->setAddresses(sampleAddrs_),
+        BadValue
+    );
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+} // namespace
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index db3ee3b..66dce8f 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -402,6 +402,8 @@ TEST_F(OptionTest, v6_addgetdel) {
 
     // let's try to delete - should fail
     EXPECT_TRUE(false ==  parent->delOption(2));
+
+    delete parent;
 }
 
 }
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
index c89743f..091bfac 100644
--- a/src/lib/dhcp/tests/pkt4_unittest.cc
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -487,13 +487,15 @@ TEST(Pkt4Test, options) {
 
     const OutputBuffer& buf = pkt->getBuffer();
     // check that all options are stored, they should take sizeof(v4Opts)
-    ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts),
+    // there also should be OPTION_END added (just one byte)
+    ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts) + 1,
               buf.getLength());
 
     // that that this extra data actually contain our options
     const uint8_t* ptr = static_cast<const uint8_t*>(buf.getData());
     ptr += Pkt4::DHCPV4_PKT_HDR_LEN; // rewind to end of fixed part
     EXPECT_EQ(0, memcmp(ptr, v4Opts, sizeof(v4Opts)));
+    EXPECT_EQ(DHO_END, static_cast<uint8_t>(*(ptr + sizeof(v4Opts))));
 
     EXPECT_NO_THROW(
         delete pkt;
@@ -559,4 +561,17 @@ TEST(Pkt4Test, unpackOptions) {
     EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+22, 3)); // data len=3
 }
 
+// This test verifies methods that are used for manipulating meta fields
+// i.e. fields that are not part of DHCPv4 (e.g. interface name).
+TEST(Pkt4Ttest, metaFields) {
+    Pkt4 pkt(DHCPDISCOVER, 1234);
+
+    pkt.setIface("lo0");
+
+    EXPECT_EQ("lo0", pkt.getIface());
+
+    /// TODO: Expand this test once additonal getters/setters are
+    /// implemented.
+}
+
 } // end of anonymous namespace
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 0d2bffd..5b93f75 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -84,6 +84,8 @@ BUILT_SOURCES += rdataclass.h rdataclass.cc
 
 lib_LTLIBRARIES = libdns++.la
 
+libdns___la_LDFLAGS = -no-undefined -version-info 1:0:1
+
 libdns___la_SOURCES =
 libdns___la_SOURCES += edns.h edns.cc
 libdns___la_SOURCES += exceptions.h exceptions.cc
@@ -102,6 +104,7 @@ libdns___la_SOURCES += rrsetlist.h rrsetlist.cc
 libdns___la_SOURCES += rrttl.h rrttl.cc
 libdns___la_SOURCES += rrtype.cc
 libdns___la_SOURCES += question.h question.cc
+libdns___la_SOURCES += serial.h serial.cc
 libdns___la_SOURCES += tsig.h tsig.cc
 libdns___la_SOURCES += tsigerror.h tsigerror.cc
 libdns___la_SOURCES += tsigkey.h tsigkey.cc
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index 3b89358..dd14991 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -12,6 +12,7 @@ libpydnspp_la_SOURCES += rrclass_python.cc rrclass_python.h
 libpydnspp_la_SOURCES += rrtype_python.cc rrtype_python.h
 libpydnspp_la_SOURCES += rrttl_python.cc rrttl_python.h
 libpydnspp_la_SOURCES += rdata_python.cc rdata_python.h
+libpydnspp_la_SOURCES += serial_python.cc serial_python.h
 libpydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
 libpydnspp_la_SOURCES += rcode_python.cc rcode_python.h
 libpydnspp_la_SOURCES += opcode_python.cc opcode_python.h
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index 0a7d8e5..212141c 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -49,6 +49,7 @@
 #include "rrset_python.h"
 #include "rrttl_python.h"
 #include "rrtype_python.h"
+#include "serial_python.h"
 #include "tsigerror_python.h"
 #include "tsigkey_python.h"
 #include "tsig_python.h"
@@ -492,6 +493,18 @@ initModulePart_RRType(PyObject* mod) {
 }
 
 bool
+initModulePart_Serial(PyObject* mod) {
+    if (PyType_Ready(&serial_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&serial_type);
+    PyModule_AddObject(mod, "Serial",
+                       reinterpret_cast<PyObject*>(&serial_type));
+
+    return (true);
+}
+
+bool
 initModulePart_TSIGError(PyObject* mod) {
     if (PyType_Ready(&tsigerror_type) < 0) {
         return (false);
@@ -804,6 +817,10 @@ PyInit_pydnspp(void) {
         return (NULL);
     }
 
+    if (!initModulePart_Serial(mod)) {
+        return (NULL);
+    }
+
     if (!initModulePart_TSIGKey(mod)) {
         return (NULL);
     }
diff --git a/src/lib/dns/python/rdata_python.cc b/src/lib/dns/python/rdata_python.cc
index 06c0263..e4ff890 100644
--- a/src/lib/dns/python/rdata_python.cc
+++ b/src/lib/dns/python/rdata_python.cc
@@ -16,6 +16,7 @@
 #include <Python.h>
 #include <dns/rdata.h>
 #include <dns/messagerenderer.h>
+#include <dns/exceptions.h>
 #include <util/buffer.h>
 #include <util/python/pycppwrapper_util.h>
 
@@ -23,6 +24,7 @@
 #include "rrtype_python.h"
 #include "rrclass_python.h"
 #include "messagerenderer_python.h"
+#include "name_python.h"
 
 using namespace isc::dns;
 using namespace isc::dns::python;
@@ -31,6 +33,27 @@ using namespace isc::util::python;
 using namespace isc::dns::rdata;
 
 namespace {
+
+typedef PyObject* method(PyObject* self, PyObject* args);
+
+// Wrap a method into an exception handling, converting C++ exceptions
+// to python ones. The params and return value is just passed through.
+PyObject*
+exception_wrap(method* method, PyObject* self, PyObject* args) {
+    try {
+        return (method(self, args));
+    } catch (const std::exception& ex) {
+        // FIXME: These exceptions are not tested, I don't know how or if
+        // at all they can be triggered. But they are caught just in the case.
+        PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+                        ex.what()).c_str());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(PyExc_Exception, "Unknown exception");
+        return (NULL);
+    }
+}
+
 class s_Rdata : public PyObject {
 public:
     isc::dns::rdata::ConstRdataPtr cppobj;
@@ -44,16 +67,16 @@ typedef CPPPyObjectContainer<s_Rdata, Rdata> RdataContainer;
 //
 
 // General creation and destruction
-int Rdata_init(s_Rdata* self, PyObject* args);
-void Rdata_destroy(s_Rdata* self);
+int Rdata_init(PyObject* self, PyObject* args, PyObject*);
+void Rdata_destroy(PyObject* self);
 
 // These are the functions we export
-PyObject* Rdata_toText(s_Rdata* self);
+PyObject* Rdata_toText(PyObject* self, PyObject*);
 // This is a second version of toText, we need one where the argument
 // is a PyObject*, for the str() function in python.
 PyObject* Rdata_str(PyObject* self);
-PyObject* Rdata_toWire(s_Rdata* self, PyObject* args);
-PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
+PyObject* Rdata_toWire(PyObject* self, PyObject* args);
+PyObject* RData_richcmp(PyObject* self, PyObject* other, int op);
 
 // This list contains the actual set of functions we have in
 // python. Each entry has
@@ -62,9 +85,9 @@ PyObject* RData_richcmp(s_Rdata* self, s_Rdata* other, int op);
 // 3. Argument type
 // 4. Documentation
 PyMethodDef Rdata_methods[] = {
-    { "to_text", reinterpret_cast<PyCFunction>(Rdata_toText), METH_NOARGS,
+    { "to_text", Rdata_toText, METH_NOARGS,
       "Returns the string representation" },
-    { "to_wire", reinterpret_cast<PyCFunction>(Rdata_toWire), METH_VARARGS,
+    { "to_wire", Rdata_toWire, METH_VARARGS,
       "Converts the Rdata object to wire format.\n"
       "The argument can be either a MessageRenderer or an object that "
       "implements the sequence interface. If the object is mutable "
@@ -75,58 +98,89 @@ PyMethodDef Rdata_methods[] = {
 };
 
 int
-Rdata_init(s_Rdata* self, PyObject* args) {
+Rdata_init(PyObject* self_p, PyObject* args, PyObject*) {
     PyObject* rrtype;
     PyObject* rrclass;
     const char* s;
     const char* data;
     Py_ssize_t len;
+    s_Rdata* self(static_cast<s_Rdata*>(self_p));
 
-    // Create from string
-    if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
-                                        &rrclass_type, &rrclass,
-                                        &s)) {
-        self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
-                                   PyRRClass_ToRRClass(rrclass), s);
-        return (0);
-    } else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
-                                &rrclass_type, &rrclass, &data, &len)) {
-        InputBuffer input_buffer(data, len);
-        self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
-                                   PyRRClass_ToRRClass(rrclass),
-                                   input_buffer, len);
-        return (0);
+    try {
+        // Create from string
+        if (PyArg_ParseTuple(args, "O!O!s", &rrtype_type, &rrtype,
+                             &rrclass_type, &rrclass,
+                             &s)) {
+            self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+                                       PyRRClass_ToRRClass(rrclass), s);
+            return (0);
+        } else if (PyArg_ParseTuple(args, "O!O!y#", &rrtype_type, &rrtype,
+                                    &rrclass_type, &rrclass, &data, &len)) {
+            InputBuffer input_buffer(data, len);
+            self->cppobj = createRdata(PyRRType_ToRRType(rrtype),
+                                       PyRRClass_ToRRClass(rrclass),
+                                       input_buffer, len);
+            return (0);
+        }
+    } catch (const isc::dns::rdata::InvalidRdataText& irdt) {
+        PyErr_SetString(po_InvalidRdataText, irdt.what());
+        return (-1);
+    } catch (const isc::dns::rdata::InvalidRdataLength& irdl) {
+        PyErr_SetString(po_InvalidRdataLength, irdl.what());
+        return (-1);
+    } catch (const isc::dns::rdata::CharStringTooLong& cstl) {
+        PyErr_SetString(po_CharStringTooLong, cstl.what());
+        return (-1);
+    } catch (const isc::dns::DNSMessageFORMERR& dmfe) {
+        PyErr_SetString(po_DNSMessageFORMERR, dmfe.what());
+        return (-1);
+    } catch (const std::exception& ex) {
+        // FIXME: These exceptions are not tested, I don't know how or if
+        // at all they can be triggered. But they are caught just in the case.
+        PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+                        ex.what()).c_str());
+        return (-1);
+    } catch (...) {
+        PyErr_SetString(PyExc_Exception, "Unknown exception");
+        return (-1);
     }
 
     return (-1);
 }
 
 void
-Rdata_destroy(s_Rdata* self) {
+Rdata_destroy(PyObject* self) {
     // Clear the shared_ptr so that its reference count is zero
     // before we call tp_free() (there is no direct release())
-    self->cppobj.reset();
+    static_cast<s_Rdata*>(self)->cppobj.reset();
     Py_TYPE(self)->tp_free(self);
 }
 
 PyObject*
-Rdata_toText(s_Rdata* self) {
+Rdata_toText_internal(PyObject* self, PyObject*) {
     // Py_BuildValue makes python objects from native data
-    return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+    return (Py_BuildValue("s", static_cast<const s_Rdata*>(self)->cppobj->
+                          toText().c_str()));
+}
+
+PyObject*
+Rdata_toText(PyObject* self, PyObject* args) {
+    return (exception_wrap(&Rdata_toText_internal, self, args));
 }
 
 PyObject*
 Rdata_str(PyObject* self) {
     // Simply call the to_text method we already defined
     return (PyObject_CallMethod(self,
-                               const_cast<char*>("to_text"),
+                                const_cast<char*>("to_text"),
                                 const_cast<char*>("")));
 }
 
 PyObject*
-Rdata_toWire(s_Rdata* self, PyObject* args) {
+Rdata_toWire_internal(PyObject* self_p, PyObject* args) {
     PyObject* bytes;
     PyObject* mr;
+    const s_Rdata* self(static_cast<const s_Rdata*>(self_p));
 
     if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
         PyObject* bytes_o = bytes;
@@ -134,6 +188,11 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
         OutputBuffer buffer(4);
         self->cppobj->toWire(buffer);
         PyObject* rd_bytes = PyBytes_FromStringAndSize(static_cast<const char*>(buffer.getData()), buffer.getLength());
+        // Make sure exceptions from here are propagated.
+        // The exception is already set, so we just return NULL
+        if (rd_bytes == NULL) {
+            return (NULL);
+        }
         PyObject* result = PySequence_InPlaceConcat(bytes_o, rd_bytes);
         // We need to release the object we temporarily created here
         // to prevent memory leak
@@ -152,45 +211,64 @@ Rdata_toWire(s_Rdata* self, PyObject* args) {
 }
 
 PyObject*
-RData_richcmp(s_Rdata* self, s_Rdata* other, int op) {
-    bool c;
+Rdata_toWire(PyObject* self, PyObject* args) {
+    return (exception_wrap(&Rdata_toWire_internal, self, args));
+}
+
+PyObject*
+RData_richcmp(PyObject* self_p, PyObject* other_p, int op) {
+    try {
+        bool c;
+        const s_Rdata* self(static_cast<const s_Rdata*>(self_p)),
+              * other(static_cast<const s_Rdata*>(other_p));
 
-    // Check for null and if the types match. If different type,
-    // simply return False
-    if (!other || (self->ob_type != other->ob_type)) {
-        Py_RETURN_FALSE;
-    }
+        // Check for null and if the types match. If different type,
+        // simply return False
+        if (!other || (self->ob_type != other->ob_type)) {
+            Py_RETURN_FALSE;
+        }
 
-    switch (op) {
-    case Py_LT:
-        c = self->cppobj->compare(*other->cppobj) < 0;
-        break;
-    case Py_LE:
-        c = self->cppobj->compare(*other->cppobj) < 0 ||
-            self->cppobj->compare(*other->cppobj) == 0;
-        break;
-    case Py_EQ:
-        c = self->cppobj->compare(*other->cppobj) == 0;
-        break;
-    case Py_NE:
-        c = self->cppobj->compare(*other->cppobj) != 0;
-        break;
-    case Py_GT:
-        c = self->cppobj->compare(*other->cppobj) > 0;
-        break;
-    case Py_GE:
-        c = self->cppobj->compare(*other->cppobj) > 0 ||
-            self->cppobj->compare(*other->cppobj) == 0;
-        break;
-    default:
-        PyErr_SetString(PyExc_IndexError,
-                        "Unhandled rich comparison operator");
+        switch (op) {
+            case Py_LT:
+                c = self->cppobj->compare(*other->cppobj) < 0;
+                break;
+            case Py_LE:
+                c = self->cppobj->compare(*other->cppobj) < 0 ||
+                    self->cppobj->compare(*other->cppobj) == 0;
+                break;
+            case Py_EQ:
+                c = self->cppobj->compare(*other->cppobj) == 0;
+                break;
+            case Py_NE:
+                c = self->cppobj->compare(*other->cppobj) != 0;
+                break;
+            case Py_GT:
+                c = self->cppobj->compare(*other->cppobj) > 0;
+                break;
+            case Py_GE:
+                c = self->cppobj->compare(*other->cppobj) > 0 ||
+                    self->cppobj->compare(*other->cppobj) == 0;
+                break;
+            default:
+                PyErr_SetString(PyExc_IndexError,
+                                "Unhandled rich comparison operator");
+                return (NULL);
+        }
+        if (c) {
+            Py_RETURN_TRUE;
+        } else {
+            Py_RETURN_FALSE;
+        }
+    } catch (const std::exception& ex) {
+        // FIXME: These exceptions are not tested, I don't know how or if
+        // at all they can be triggered. But they are caught just in the case.
+        PyErr_SetString(PyExc_Exception, (std::string("Unknown exception: ") +
+                        ex.what()).c_str());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(PyExc_Exception, "Unknown exception");
         return (NULL);
     }
-    if (c)
-        Py_RETURN_TRUE;
-    else
-        Py_RETURN_FALSE;
 }
 
 } // end of unnamed namespace
@@ -217,7 +295,7 @@ PyTypeObject rdata_type = {
     "pydnspp.Rdata",
     sizeof(s_Rdata),                    // tp_basicsize
     0,                                  // tp_itemsize
-    (destructor)Rdata_destroy,          // tp_dealloc
+    Rdata_destroy,                      // tp_dealloc
     NULL,                               // tp_print
     NULL,                               // tp_getattr
     NULL,                               // tp_setattr
@@ -237,7 +315,7 @@ PyTypeObject rdata_type = {
     "a set of common interfaces to manipulate concrete RDATA objects.",
     NULL,                               // tp_traverse
     NULL,                               // tp_clear
-    (richcmpfunc)RData_richcmp,         // tp_richcompare
+    RData_richcmp,                      // tp_richcompare
     0,                                  // tp_weaklistoffset
     NULL,                               // tp_iter
     NULL,                               // tp_iternext
@@ -249,7 +327,7 @@ PyTypeObject rdata_type = {
     NULL,                               // tp_descr_get
     NULL,                               // tp_descr_set
     0,                                  // tp_dictoffset
-    (initproc)Rdata_init,               // tp_init
+    Rdata_init,                         // tp_init
     NULL,                               // tp_alloc
     PyType_GenericNew,                  // tp_new
     NULL,                               // tp_free
diff --git a/src/lib/dns/python/rrset_python.cc b/src/lib/dns/python/rrset_python.cc
index 73a19e7..77d520b 100644
--- a/src/lib/dns/python/rrset_python.cc
+++ b/src/lib/dns/python/rrset_python.cc
@@ -52,51 +52,51 @@ public:
 int RRset_init(s_RRset* self, PyObject* args);
 void RRset_destroy(s_RRset* self);
 
-PyObject* RRset_getRdataCount(s_RRset* self);
-PyObject* RRset_getName(s_RRset* self);
-PyObject* RRset_getClass(s_RRset* self);
-PyObject* RRset_getType(s_RRset* self);
-PyObject* RRset_getTTL(s_RRset* self);
-PyObject* RRset_setName(s_RRset* self, PyObject* args);
-PyObject* RRset_setTTL(s_RRset* self, PyObject* args);
-PyObject* RRset_toText(s_RRset* self);
+PyObject* RRset_getRdataCount(PyObject* self, PyObject* args);
+PyObject* RRset_getName(PyObject* self, PyObject* args);
+PyObject* RRset_getClass(PyObject* self, PyObject* args);
+PyObject* RRset_getType(PyObject* self, PyObject* args);
+PyObject* RRset_getTTL(PyObject* self, PyObject* args);
+PyObject* RRset_setName(PyObject* self, PyObject* args);
+PyObject* RRset_setTTL(PyObject* self, PyObject* args);
+PyObject* RRset_toText(PyObject* self, PyObject* args);
 PyObject* RRset_str(PyObject* self);
-PyObject* RRset_toWire(s_RRset* self, PyObject* args);
-PyObject* RRset_addRdata(s_RRset* self, PyObject* args);
-PyObject* RRset_getRdata(PyObject* po_self, PyObject*);
-PyObject* RRset_removeRRsig(s_RRset* self);
+PyObject* RRset_toWire(PyObject* self, PyObject* args);
+PyObject* RRset_addRdata(PyObject* self, PyObject* args);
+PyObject* RRset_getRdata(PyObject* po_self, PyObject* args);
+PyObject* RRset_removeRRsig(PyObject* self, PyObject* args);
 
 // TODO: iterator?
 
 PyMethodDef RRset_methods[] = {
-    { "get_rdata_count", reinterpret_cast<PyCFunction>(RRset_getRdataCount), METH_NOARGS,
+    { "get_rdata_count", RRset_getRdataCount, METH_NOARGS,
       "Returns the number of rdata fields." },
-    { "get_name", reinterpret_cast<PyCFunction>(RRset_getName), METH_NOARGS,
+    { "get_name", RRset_getName, METH_NOARGS,
       "Returns the name of the RRset, as a Name object." },
-    { "get_class", reinterpret_cast<PyCFunction>(RRset_getClass), METH_NOARGS,
+    { "get_class", RRset_getClass, METH_NOARGS,
       "Returns the class of the RRset as an RRClass object." },
-    { "get_type", reinterpret_cast<PyCFunction>(RRset_getType), METH_NOARGS,
+    { "get_type", RRset_getType, METH_NOARGS,
       "Returns the type of the RRset as an RRType object." },
-    { "get_ttl", reinterpret_cast<PyCFunction>(RRset_getTTL), METH_NOARGS,
+    { "get_ttl", RRset_getTTL, METH_NOARGS,
       "Returns the TTL of the RRset as an RRTTL object." },
-    { "set_name", reinterpret_cast<PyCFunction>(RRset_setName), METH_VARARGS,
+    { "set_name", RRset_setName, METH_VARARGS,
       "Sets the name of the RRset.\nTakes a Name object as an argument." },
-    { "set_ttl", reinterpret_cast<PyCFunction>(RRset_setTTL), METH_VARARGS,
+    { "set_ttl", RRset_setTTL, METH_VARARGS,
       "Sets the TTL of the RRset.\nTakes an RRTTL object as an argument." },
-    { "to_text", reinterpret_cast<PyCFunction>(RRset_toText), METH_NOARGS,
+    { "to_text", RRset_toText, METH_NOARGS,
       "Returns the text representation of the RRset as a string" },
-    { "to_wire", reinterpret_cast<PyCFunction>(RRset_toWire), METH_VARARGS,
+    { "to_wire", RRset_toWire, METH_VARARGS,
       "Converts the RRset object to wire format.\n"
       "The argument can be either a MessageRenderer or an object that "
       "implements the sequence interface. If the object is mutable "
       "(for instance a bytearray()), the wire data is added in-place.\n"
       "If it is not (for instance a bytes() object), a new object is "
       "returned" },
-    { "add_rdata", reinterpret_cast<PyCFunction>(RRset_addRdata), METH_VARARGS,
+    { "add_rdata", RRset_addRdata, METH_VARARGS,
       "Adds the rdata for one RR to the RRset.\nTakes an Rdata object as an argument" },
     { "get_rdata", RRset_getRdata, METH_NOARGS,
       "Returns a List containing all Rdata elements" },
-    { "remove_rrsig", reinterpret_cast<PyCFunction>(RRset_removeRRsig), METH_NOARGS,
+    { "remove_rrsig", RRset_removeRRsig, METH_NOARGS,
       "Clears the list of RRsigs for this RRset" },
     { NULL, NULL, 0, NULL }
 };
@@ -133,14 +133,16 @@ RRset_destroy(s_RRset* self) {
 }
 
 PyObject*
-RRset_getRdataCount(s_RRset* self) {
-    return (Py_BuildValue("I", self->cppobj->getRdataCount()));
+RRset_getRdataCount(PyObject* self, PyObject*) {
+    return (Py_BuildValue("I", static_cast<const s_RRset*>(self)->cppobj->
+                          getRdataCount()));
 }
 
 PyObject*
-RRset_getName(s_RRset* self) {
+RRset_getName(PyObject* self, PyObject*) {
     try {
-        return (createNameObject(self->cppobj->getName()));
+        return (createNameObject(static_cast<const s_RRset*>(self)->cppobj->
+                                 getName()));
     } catch (const exception& ex) {
         const string ex_what =
             "Unexpected failure getting rrset Name: " +
@@ -154,9 +156,10 @@ RRset_getName(s_RRset* self) {
 }
 
 PyObject*
-RRset_getClass(s_RRset* self) {
+RRset_getClass(PyObject* self, PyObject*) {
     try {
-        return (createRRClassObject(self->cppobj->getClass()));
+        return (createRRClassObject(static_cast<const s_RRset*>(self)->cppobj->
+                                    getClass()));
     } catch (const exception& ex) {
         const string ex_what =
             "Unexpected failure getting question RRClass: " +
@@ -170,9 +173,10 @@ RRset_getClass(s_RRset* self) {
 }
 
 PyObject*
-RRset_getType(s_RRset* self) {
+RRset_getType(PyObject* self, PyObject*) {
     try {
-        return (createRRTypeObject(self->cppobj->getType()));
+        return (createRRTypeObject(static_cast<const s_RRset*>(self)->cppobj->
+                                   getType()));
     } catch (const exception& ex) {
         const string ex_what =
             "Unexpected failure getting question RRType: " +
@@ -186,9 +190,10 @@ RRset_getType(s_RRset* self) {
 }
 
 PyObject*
-RRset_getTTL(s_RRset* self) {
+RRset_getTTL(PyObject* self, PyObject*) {
     try {
-        return (createRRTTLObject(self->cppobj->getTTL()));
+        return (createRRTTLObject(static_cast<const s_RRset*>(self)->cppobj->
+                                  getTTL()));
     } catch (const exception& ex) {
         const string ex_what =
             "Unexpected failure getting question TTL: " +
@@ -202,29 +207,30 @@ RRset_getTTL(s_RRset* self) {
 }
 
 PyObject*
-RRset_setName(s_RRset* self, PyObject* args) {
+RRset_setName(PyObject* self, PyObject* args) {
     PyObject* name;
     if (!PyArg_ParseTuple(args, "O!", &name_type, &name)) {
         return (NULL);
     }
-    self->cppobj->setName(PyName_ToName(name));
+    static_cast<s_RRset*>(self)->cppobj->setName(PyName_ToName(name));
     Py_RETURN_NONE;
 }
 
 PyObject*
-RRset_setTTL(s_RRset* self, PyObject* args) {
+RRset_setTTL(PyObject* self, PyObject* args) {
     PyObject* rrttl;
     if (!PyArg_ParseTuple(args, "O!", &rrttl_type, &rrttl)) {
         return (NULL);
     }
-    self->cppobj->setTTL(PyRRTTL_ToRRTTL(rrttl));
+    static_cast<s_RRset*>(self)->cppobj->setTTL(PyRRTTL_ToRRTTL(rrttl));
     Py_RETURN_NONE;
 }
 
 PyObject*
-RRset_toText(s_RRset* self) {
+RRset_toText(PyObject* self, PyObject*) {
     try {
-        return (Py_BuildValue("s", self->cppobj->toText().c_str()));
+        return (Py_BuildValue("s", static_cast<const s_RRset*>(self)->cppobj->
+                              toText().c_str()));
     } catch (const EmptyRRset& ers) {
         PyErr_SetString(po_EmptyRRset, ers.what());
         return (NULL);
@@ -235,14 +241,15 @@ PyObject*
 RRset_str(PyObject* self) {
     // Simply call the to_text method we already defined
     return (PyObject_CallMethod(self,
-                               const_cast<char*>("to_text"),
+                                const_cast<char*>("to_text"),
                                 const_cast<char*>("")));
 }
 
 PyObject*
-RRset_toWire(s_RRset* self, PyObject* args) {
+RRset_toWire(PyObject* self_p, PyObject* args) {
     PyObject* bytes;
     PyObject* mr;
+    const s_RRset* self(static_cast<const s_RRset*>(self_p));
 
     try {
         if (PyArg_ParseTuple(args, "O", &bytes) && PySequence_Check(bytes)) {
@@ -274,13 +281,13 @@ RRset_toWire(s_RRset* self, PyObject* args) {
 }
 
 PyObject*
-RRset_addRdata(s_RRset* self, PyObject* args) {
+RRset_addRdata(PyObject* self, PyObject* args) {
     PyObject* rdata;
     if (!PyArg_ParseTuple(args, "O!", &rdata_type, &rdata)) {
         return (NULL);
     }
     try {
-        self->cppobj->addRdata(PyRdata_ToRdata(rdata));
+        static_cast<s_RRset*>(self)->cppobj->addRdata(PyRdata_ToRdata(rdata));
         Py_RETURN_NONE;
     } catch (const std::bad_cast&) {
         PyErr_Clear();
@@ -324,8 +331,8 @@ RRset_getRdata(PyObject* po_self, PyObject*) {
 }
 
 PyObject*
-RRset_removeRRsig(s_RRset* self) {
-    self->cppobj->removeRRsig();
+RRset_removeRRsig(PyObject* self, PyObject*) {
+    static_cast<s_RRset*>(self)->cppobj->removeRRsig();
     Py_RETURN_NONE;
 }
 
diff --git a/src/lib/dns/python/serial_python.cc b/src/lib/dns/python/serial_python.cc
new file mode 100644
index 0000000..e2bd809
--- /dev/null
+++ b/src/lib/dns/python/serial_python.cc
@@ -0,0 +1,281 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <dns/serial.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "serial_python.h"
+#include "pydnspp_common.h"
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_Serial : public PyObject {
+public:
+    s_Serial() : cppobj(NULL) {};
+    isc::dns::Serial* cppobj;
+};
+
+typedef CPPPyObjectContainer<s_Serial, Serial> SerialContainer;
+
+PyObject* Serial_str(PyObject* self);
+PyObject* Serial_getValue(s_Serial* self);
+PyObject* Serial_richcmp(s_Serial* self, s_Serial* other, int op);
+PyObject* Serial_add(PyObject *right, PyObject *left);
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef Serial_methods[] = {
+    { "get_value", reinterpret_cast<PyCFunction>(Serial_getValue), METH_NOARGS,
+      "Returns the Serial value as an integer" },
+    { NULL, NULL, 0, NULL }
+};
+
+// For overriding the + operator. We do not define any other operators for
+// this type.
+PyNumberMethods Serial_NumberMethods = {
+    Serial_add, //nb_add;
+    NULL, //nb_subtract;
+    NULL, //nb_multiply;
+    NULL, //nb_remainder;
+    NULL, //nb_divmod;
+    NULL, //nb_power;
+    NULL, //nb_negative;
+    NULL, //nb_positive;
+    NULL, //nb_absolute;
+    NULL, //nb_bool;
+    NULL, //nb_invert;
+    NULL, //nb_lshift;
+    NULL, //nb_rshift;
+    NULL, //nb_and;
+    NULL, //nb_xor;
+    NULL, //nb_or;
+    NULL, //nb_int;
+    NULL, //nb_reserved;
+    NULL, //nb_float;
+
+    NULL, //nb_inplace_add;
+    NULL, //nb_inplace_subtract;
+    NULL, //nb_inplace_multiply;
+    NULL, //nb_inplace_remainder;
+    NULL, //nb_inplace_power;
+    NULL, //nb_inplace_lshift;
+    NULL, //nb_inplace_rshift;
+    NULL, //nb_inplace_and;
+    NULL, //nb_inplace_xor;
+    NULL, //nb_inplace_or;
+
+    NULL, //nb_floor_divide;
+    NULL, //nb_true_divide;
+    NULL, //nb_inplace_floor_divide;
+    NULL, //nb_inplace_true_divide;
+
+    NULL, //nb_index;
+};
+
+int
+Serial_init(s_Serial* self, PyObject* args) {
+    long long i;
+    if (PyArg_ParseTuple(args, "L", &i)) {
+        PyErr_Clear();
+        if (i < 0 || i > 0xffffffff) {
+            PyErr_SetString(PyExc_ValueError, "Serial number out of range");
+            return (-1);
+        }
+        self->cppobj = new Serial(i);
+        return (0);
+    } else {
+        return (-1);
+    }
+}
+
+void
+Serial_destroy(s_Serial* self) {
+    delete self->cppobj;
+    self->cppobj = NULL;
+    Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+Serial_getValue(s_Serial* self) {
+    return (Py_BuildValue("I", self->cppobj->getValue()));
+}
+
+PyObject*
+Serial_str(PyObject* po_self) {
+    const s_Serial* const self = static_cast<s_Serial*>(po_self);
+    return (PyUnicode_FromFormat("%u", self->cppobj->getValue()));
+}
+
+PyObject*
+Serial_richcmp(s_Serial* self, s_Serial* other, int op) {
+    bool c = false;
+
+    // Check for null and if the types match. If different type,
+    // simply return False
+    if (!other || (self->ob_type != other->ob_type)) {
+        Py_RETURN_FALSE;
+    }
+
+    switch (op) {
+    case Py_LT:
+        c = *self->cppobj < *other->cppobj;
+        break;
+    case Py_LE:
+        c = *self->cppobj <= *other->cppobj;
+        break;
+    case Py_EQ:
+        c = *self->cppobj == *other->cppobj;
+        break;
+    case Py_NE:
+        c = *self->cppobj != *other->cppobj;
+        break;
+    case Py_GT:
+        c = *self->cppobj > *other->cppobj;
+        break;
+    case Py_GE:
+        c = *self->cppobj >= *other->cppobj;
+        break;
+    }
+    if (c) {
+        Py_RETURN_TRUE;
+    } else {
+        Py_RETURN_FALSE;
+    }
+}
+
+PyObject *
+Serial_add(PyObject *left, PyObject *right) {
+    // Either can be either a serial or a long, as long as one of them is a
+    // serial
+    if (PySerial_Check(left) && PySerial_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(left) +
+                                   PySerial_ToSerial(right)));
+    } else if (PySerial_Check(left) && PyLong_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(left) +
+                                   PyLong_AsLong(right)));
+    } else if (PyLong_Check(left) && PySerial_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(right) +
+                                   PyLong_AsLong(left)));
+    } else {
+        Py_INCREF(Py_NotImplemented);
+        return Py_NotImplemented;
+    }
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Serial
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject serial_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.Serial",
+    sizeof(s_Serial),                   // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)Serial_destroy,         // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    &Serial_NumberMethods,              // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    Serial_str,                         // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The Serial class encapsulates Serials used in DNS SOA records.\n\n"
+    "This is a straightforward class; an Serial object simply maintains a "
+    "32-bit unsigned integer corresponding to the SOA SERIAL value.  The "
+    "main purpose of this class is to provide serial number arithmetic, as "
+    "described in RFC 1892. Objects of this type can be compared and added "
+    "to each other, as described in RFC 1892. Apart from str(), get_value(), "
+    "comparison operators, and the + operator, no other operations are "
+    "defined for this type.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    (richcmpfunc)Serial_richcmp,        // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    Serial_methods,                     // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)Serial_init,              // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createSerialObject(const Serial& source) {
+    SerialContainer container(PyObject_New(s_Serial, &serial_type));
+    container.set(new Serial(source));
+    return (container.release());
+}
+
+bool
+PySerial_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Serial typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &serial_type));
+}
+
+const Serial&
+PySerial_ToSerial(const PyObject* serial_obj) {
+    if (serial_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Serial PyObject conversion");
+    }
+    const s_Serial* serial = static_cast<const s_Serial*>(serial_obj);
+    return (*serial->cppobj);
+}
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/serial_python.h b/src/lib/dns/python/serial_python.h
new file mode 100644
index 0000000..48b5199
--- /dev/null
+++ b/src/lib/dns/python/serial_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_SERIAL_H
+#define __PYTHON_SERIAL_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Serial;
+
+namespace python {
+
+extern PyTypeObject serial_type;
+
+/// This is a simple shortcut to create a python Serial object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createSerialObject(const Serial& source);
+
+/// \brief Checks if the given python object is a Serial object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Serial, false otherwise
+bool PySerial_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Serial object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type Serial; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PySerial_Check()
+///
+/// \note This is not a copy; if the Serial is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param Serial_obj The Serial object to convert
+const Serial& PySerial_ToSerial(const PyObject* Serial_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_SERIAL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tests/Makefile.am b/src/lib/dns/python/tests/Makefile.am
index d1273f3..3338727 100644
--- a/src/lib/dns/python/tests/Makefile.am
+++ b/src/lib/dns/python/tests/Makefile.am
@@ -11,6 +11,7 @@ PYTESTS += rrclass_python_test.py
 PYTESTS += rrset_python_test.py
 PYTESTS += rrttl_python_test.py
 PYTESTS += rrtype_python_test.py
+PYTESTS += serial_python_test.py
 PYTESTS += tsig_python_test.py
 PYTESTS += tsig_rdata_python_test.py
 PYTESTS += tsigerror_python_test.py
diff --git a/src/lib/dns/python/tests/rdata_python_test.py b/src/lib/dns/python/tests/rdata_python_test.py
index 776f792..81dea5f 100644
--- a/src/lib/dns/python/tests/rdata_python_test.py
+++ b/src/lib/dns/python/tests/rdata_python_test.py
@@ -35,6 +35,14 @@ class RdataTest(unittest.TestCase):
         self.assertRaises(TypeError, Rdata, "wrong", RRClass("IN"), "192.0.2.99")
         self.assertRaises(TypeError, Rdata, RRType("A"), "wrong", "192.0.2.99")
         self.assertRaises(TypeError, Rdata, RRType("A"), RRClass("IN"), 1)
+        self.assertRaises(InvalidRdataText, Rdata, RRType("A"), RRClass("IN"),
+                          "Invalid Rdata Text")
+        self.assertRaises(CharStringTooLong, Rdata, RRType("TXT"),
+                          RRClass("IN"), ' ' * 256)
+        self.assertRaises(InvalidRdataLength, Rdata, RRType("TXT"),
+                          RRClass("IN"), bytes(65536))
+        self.assertRaises(DNSMessageFORMERR, Rdata, RRType("TXT"),
+                          RRClass("IN"), b"\xff")
 
     def test_rdata_to_wire(self):
         b = bytearray()
diff --git a/src/lib/dns/python/tests/serial_python_test.py b/src/lib/dns/python/tests/serial_python_test.py
new file mode 100644
index 0000000..0ca08c2
--- /dev/null
+++ b/src/lib/dns/python/tests/serial_python_test.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Tests for the rrttl part of the pydnspp module
+#
+
+import unittest
+import os
+from pydnspp import *
+
+class SerialTest(unittest.TestCase):
+    def setUp(self):
+        self.one = Serial(1)
+        self.one_2 = Serial(1)
+        self.two = Serial(2)
+        self.date_zero = Serial(1980120100)
+        self.date_one = Serial(1980120101)
+        self.zero = Serial(0)
+        self.highest = Serial(4294967295)
+        self.number_low = Serial(12345)
+        self.number_medium = Serial(2000000000)
+        self.number_high = Serial(4000000000)
+
+    def test_init(self):
+        self.assertRaises(ValueError, Serial, -1)
+        self.assertRaises(ValueError, Serial, 4294967296)
+        self.assertRaises(ValueError, Serial, 4294967297)
+        self.assertRaises(ValueError, Serial, 100000000000)
+
+    def test_get_value(self):
+        self.assertEqual(1, self.one.get_value())
+        self.assertNotEqual(2, self.one_2.get_value())
+        self.assertEqual(2, self.two.get_value())
+        self.assertEqual(1980120100, self.date_zero.get_value())
+        self.assertEqual(1980120101, self.date_one.get_value())
+        self.assertEqual(0, self.zero.get_value())
+        self.assertEqual(4294967295, self.highest.get_value())
+        self.assertEqual(12345, self.number_low.get_value())
+        self.assertEqual(2000000000, self.number_medium.get_value())
+        self.assertEqual(4000000000, self.number_high.get_value())
+
+    def test_str(self):
+        self.assertEqual('1', str(self.one))
+        self.assertNotEqual('2', str(self.one_2))
+        self.assertEqual('2', str(self.two))
+        self.assertEqual('1980120100', str(self.date_zero))
+        self.assertEqual('1980120101', str(self.date_one))
+        self.assertEqual('0', str(self.zero))
+        self.assertEqual('4294967295', str(self.highest))
+        self.assertEqual('12345', str(self.number_low))
+        self.assertEqual('2000000000', str(self.number_medium))
+        self.assertEqual('4000000000', str(self.number_high))
+
+    def test_equals(self):
+        self.assertEqual(self.one, self.one)
+        self.assertEqual(self.one, self.one_2)
+        self.assertNotEqual(self.one, self.two)
+        self.assertNotEqual(self.two, self.one)
+        self.assertEqual(Serial(12345), self.number_low)
+        self.assertNotEqual(Serial(12346), self.number_low)
+
+    def test_compare(self):
+        # These should be true/false even without serial arithmetic
+        self.assertLessEqual(self.one, self.one)
+        self.assertLessEqual(self.one, self.one_2)
+        self.assertLess(self.one, self.two)
+        self.assertLessEqual(self.one, self.one)
+        self.assertLessEqual(self.one, self.two)
+        self.assertGreater(self.two, self.one)
+        self.assertGreaterEqual(self.two, self.two)
+        self.assertGreaterEqual(self.two, self.one)
+        self.assertLess(self.one, self.number_low)
+        self.assertLess(self.number_low, self.number_medium)
+        self.assertLess(self.number_medium, self.number_high)
+
+        # These should 'wrap'
+        self.assertGreater(self.zero, self.highest)
+        self.assertLess(self.highest, self.one)
+        self.assertLess(self.number_high, self.number_low)
+
+    def test_addition(self):
+        self.assertEqual(self.two, self.one + self.one)
+        self.assertEqual(self.two, self.one + self.one_2)
+        self.assertEqual(self.highest, self.highest + self.zero)
+        self.assertEqual(self.zero, self.highest + self.one)
+        self.assertEqual(self.one, self.highest + self.two)
+        self.assertEqual(self.one, self.highest + self.one + self.one)
+        self.assertEqual(self.one + 100, self.highest + 102)
+        self.assertEqual(100 + self.one, self.highest + 102)
+        self.assertEqual(self.zero + 2147483645, self.highest + 2147483646)
+
+        # using lambda so the error doesn't get thrown on initial evaluation
+        self.assertRaises(TypeError, lambda: self.zero + "bad")
+        self.assertRaises(TypeError, lambda: self.zero + None)
+        self.assertRaises(TypeError, lambda: "bad" + self.zero)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/lib/dns/rdata/generic/soa_6.cc b/src/lib/dns/rdata/generic/soa_6.cc
index 875a957..e473bca 100644
--- a/src/lib/dns/rdata/generic/soa_6.cc
+++ b/src/lib/dns/rdata/generic/soa_6.cc
@@ -106,10 +106,10 @@ SOA::toWire(AbstractMessageRenderer& renderer) const {
     renderer.writeData(numdata_, sizeof(numdata_));
 }
 
-uint32_t
+Serial
 SOA::getSerial() const {
     InputBuffer b(numdata_, sizeof(numdata_));
-    return (b.readUint32());
+    return (Serial(b.readUint32()));
 }
 
 string
diff --git a/src/lib/dns/rdata/generic/soa_6.h b/src/lib/dns/rdata/generic/soa_6.h
index 4c6b6ec..2c180b2 100644
--- a/src/lib/dns/rdata/generic/soa_6.h
+++ b/src/lib/dns/rdata/generic/soa_6.h
@@ -18,6 +18,7 @@
 
 #include <dns/name.h>
 #include <dns/rdata.h>
+#include <dns/serial.h>
 
 // BEGIN_ISC_NAMESPACE
 
@@ -35,7 +36,7 @@ public:
         uint32_t refresh, uint32_t retry, uint32_t expire,
         uint32_t minimum);
     /// \brief Returns the serial stored in the SOA.
-    uint32_t getSerial() const;
+    Serial getSerial() const;
 private:
     /// Note: this is a prototype version; we may reconsider
     /// this representation later.
diff --git a/src/lib/dns/serial.cc b/src/lib/dns/serial.cc
new file mode 100644
index 0000000..90bc242
--- /dev/null
+++ b/src/lib/dns/serial.cc
@@ -0,0 +1,76 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dns/serial.h>
+
+namespace isc {
+namespace dns {
+
+bool
+Serial::operator==(const Serial& other) const {
+    return (value_ == other.getValue());
+}
+
+bool
+Serial::operator!=(const Serial& other) const {
+    return (value_ != other.getValue());
+}
+
+bool
+Serial::operator<(const Serial& other) const {
+    uint32_t other_val = other.getValue();
+    bool result = false;
+    if (value_ < other_val) {
+        result = ((other_val - value_) <= MAX_SERIAL_INCREMENT);
+    } else if (other_val < value_) {
+        result = ((value_ - other_val) > MAX_SERIAL_INCREMENT);
+    }
+    return (result);
+}
+
+bool
+Serial::operator<=(const Serial& other) const {
+    return (operator==(other) || operator<(other));
+}
+
+bool
+Serial::operator>(const Serial& other) const {
+    return (!operator==(other) && !operator<(other));
+}
+
+bool
+Serial::operator>=(const Serial& other) const {
+    return (!operator<(other));
+}
+
+Serial
+Serial::operator+(uint32_t other_val) const {
+    uint64_t new_val = static_cast<uint64_t>(value_) +
+                       static_cast<uint64_t>(other_val);
+    return Serial(static_cast<uint32_t>(new_val % MAX_SERIAL_VALUE));
+}
+
+Serial
+Serial::operator+(const Serial& other) const {
+    return (operator+(other.getValue()));
+}
+
+std::ostream&
+operator<<(std::ostream& os, const Serial& serial) {
+    return (os << serial.getValue());
+}
+
+} // end namespace dns
+} // end namespace isc
+
diff --git a/src/lib/dns/serial.h b/src/lib/dns/serial.h
new file mode 100644
index 0000000..3549860
--- /dev/null
+++ b/src/lib/dns/serial.h
@@ -0,0 +1,155 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __SERIAL_H
+#define __SERIAL_H 1
+
+#include <stdint.h>
+#include <iostream>
+
+namespace isc {
+namespace dns {
+
+/// The maximum difference between two serial numbers. If the (plain uint32_t)
+/// difference between two serials is greater than this number, the smaller one
+/// is considered greater.
+const uint32_t MAX_SERIAL_INCREMENT = 2147483647;
+
+/// Maximum value a serial can have, used in + operator.
+const uint64_t MAX_SERIAL_VALUE = 4294967296ull;
+
+/// \brief This class defines DNS serial numbers and serial arithmetic.
+///
+/// DNS Serial number are in essence unsigned 32-bits numbers, with one
+/// catch; they should be compared using sequence space arithmetic.
+/// So given that they are 32-bits; as soon as the difference between two
+/// serial numbers is greater than 2147483647 (2^31 - 1), the lower number
+/// (in plain comparison) is considered the higher one.
+///
+/// In order to do this as transparently as possible, these numbers are
+/// stored in the Serial class, which overrides the basic comparison operators.
+///
+/// In this specific context, these operations are called 'serial number
+/// arithmetic', and they are defined in RFC 1982.
+///
+/// \note RFC 1982 defines everything based on the value SERIAL_BITS. Since
+/// the serial number has a fixed length of 32 bits, the values we use are
+/// hard-coded, and not computed based on variable bit lengths.
+class Serial {
+public:
+    /// \brief Constructor with value
+    ///
+    /// \param value The uint32_t value of the serial
+    explicit Serial(uint32_t value) : value_(value) {}
+
+    /// \brief Copy constructor
+    Serial(const Serial& other) : value_(other.getValue()) {}
+
+    /// \brief Direct assignment from other Serial
+    ///
+    /// \param other The Serial to assign the value from
+    void operator=(const Serial& other) { value_ = other.getValue(); }
+
+    /// \brief Direct assignment from value
+    ///
+    /// \param value the uint32_t value to assing
+    void operator=(uint32_t value) { value_ = value; }
+
+    /// \brief Returns the uint32_t representation of this serial value
+    ///
+    /// \return The uint32_t value of this Serial
+    uint32_t getValue() const { return (value_); }
+
+    /// \brief Returns true if the serial values are equal
+    ///
+    /// \return True if the values are equal
+    bool operator==(const Serial& other) const;
+
+    /// \brief Returns true if the serial values are not equal
+    ///
+    /// \return True if the values are not equal
+    bool operator!=(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is smaller than
+    /// the other, according to serial arithmetic as described in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is smaller than the given value
+    bool operator<(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is equal to or
+    /// smaller than the other, according to serial arithmetic as described
+    /// in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is smaller than or equal to the given value
+    bool operator<=(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is greater than
+    /// the other, according to serial arithmetic as described in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is greater than the given value
+    bool operator>(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is equal to or
+    /// greater than the other, according to serial arithmetic as described in
+    /// RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is greater than or equal to the given value
+    bool operator>=(const Serial& other) const;
+
+    /// \brief Adds the given value to the serial number. If this would make
+    /// the number greater than 2^32-1, it is 'wrapped'.
+    /// \note According to the specification, an addition greater than
+    /// MAX_SERIAL_INCREMENT is undefined. We do NOT catch this error (so as not
+    /// to raise exceptions), but this behaviour remains undefined.
+    ///
+    /// \param other The Serial to add
+    ///
+    /// \return The result of the addition
+    Serial operator+(const Serial& other) const;
+
+    /// \brief Adds the given value to the serial number. If this would make
+    /// the number greater than 2^32-1, it is 'wrapped'.
+    ///
+    /// \note According to the specification, an addition greater than
+    /// MAX_SERIAL_INCREMENT is undefined. We do NOT catch this error (so as not
+    /// to raise exceptions), but this behaviour remains undefined.
+    ///
+    /// \param other_val The uint32_t value to add
+    ///
+    /// \return The result of the addition
+    Serial operator+(uint32_t other_val) const;
+
+private:
+    uint32_t value_;
+};
+
+/// \brief Helper operator for output streams, writes the value to the stream
+///
+/// \param os The ostream to write to
+/// \param serial The Serial to write
+/// \return the output stream
+std::ostream& operator<<(std::ostream& os, const Serial& serial);
+
+} // end namespace dns
+} // end namespace isc
+
+#endif // __SERIAL_H
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index ceeb3b8..cfd1286 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -54,6 +54,7 @@ run_unittests_SOURCES += question_unittest.cc
 run_unittests_SOURCES += rrparamregistry_unittest.cc
 run_unittests_SOURCES += masterload_unittest.cc
 run_unittests_SOURCES += message_unittest.cc
+run_unittests_SOURCES += serial_unittest.cc
 run_unittests_SOURCES += tsig_unittest.cc
 run_unittests_SOURCES += tsigerror_unittest.cc
 run_unittests_SOURCES += tsigkey_unittest.cc
@@ -61,12 +62,12 @@ run_unittests_SOURCES += tsigrecord_unittest.cc
 run_unittests_SOURCES += character_string_unittest.cc
 run_unittests_SOURCES += run_unittests.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-# We shouldn't need to include BOTAN_LDFLAGS here, but there
+# We shouldn't need to include BOTAN_LIBS here, but there
 # is one test system where the path for GTEST_LDFLAGS contains
 # an older version of botan, and somehow that version gets
 # linked if we don't
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS)
+run_unittests_LDADD = $(BOTAN_LIBS) $(GTEST_LDADD)
 run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/dns/tests/rdata_soa_unittest.cc b/src/lib/dns/tests/rdata_soa_unittest.cc
index 17498eb..07c24d5 100644
--- a/src/lib/dns/tests/rdata_soa_unittest.cc
+++ b/src/lib/dns/tests/rdata_soa_unittest.cc
@@ -76,7 +76,7 @@ TEST_F(Rdata_SOA_Test, toText) {
 }
 
 TEST_F(Rdata_SOA_Test, getSerial) {
-    EXPECT_EQ(2010012601, rdata_soa.getSerial());
+    EXPECT_EQ(2010012601, rdata_soa.getSerial().getValue());
 }
 
 }
diff --git a/src/lib/dns/tests/serial_unittest.cc b/src/lib/dns/tests/serial_unittest.cc
new file mode 100644
index 0000000..e27f628
--- /dev/null
+++ b/src/lib/dns/tests/serial_unittest.cc
@@ -0,0 +1,179 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/serial.h>
+
+using namespace isc::dns;
+
+class SerialTest : public ::testing::Test {
+public:
+    SerialTest() : one(1), one_2(1), two(2),
+                   date_zero(1980120100), date_one(1980120101),
+                   min(0), max(4294967295u),
+                   number_low(12345),
+                   number_medium(2000000000),
+                   number_high(4000000000u)
+    {}
+    Serial one, one_2, two, date_zero, date_one, min, max, number_low, number_medium, number_high;
+};
+
+//
+// Basic tests
+//
+
+TEST_F(SerialTest, get_value) {
+    EXPECT_EQ(1, one.getValue());
+    EXPECT_NE(2, one.getValue());
+    EXPECT_EQ(2, two.getValue());
+    EXPECT_EQ(1980120100, date_zero.getValue());
+    EXPECT_EQ(1980120101, date_one.getValue());
+    EXPECT_EQ(0, min.getValue());
+    EXPECT_EQ(4294967295u, max.getValue());
+    EXPECT_EQ(12345, number_low.getValue());
+    EXPECT_EQ(2000000000, number_medium.getValue());
+    EXPECT_EQ(4000000000u, number_high.getValue());
+}
+
+TEST_F(SerialTest, equals) {
+    EXPECT_EQ(one, one);
+    EXPECT_EQ(one, one_2);
+    EXPECT_NE(one, two);
+    EXPECT_NE(two, one);
+    EXPECT_EQ(Serial(12345), number_low);
+    EXPECT_NE(Serial(12346), number_low);
+}
+
+TEST_F(SerialTest, comparison) {
+    // These should be true/false even without serial arithmetic
+    EXPECT_LE(one, one);
+    EXPECT_LE(one, one_2);
+    EXPECT_LT(one, two);
+    EXPECT_LE(one, two);
+    EXPECT_GE(two, two);
+    EXPECT_GT(two, one);
+    EXPECT_GE(two, one);
+    EXPECT_LT(one, number_low);
+    EXPECT_LT(number_low, number_medium);
+    EXPECT_LT(number_medium, number_high);
+
+    // now let's try some that 'wrap', as it were
+    EXPECT_GT(min, max);
+    EXPECT_LT(max, min);
+    EXPECT_LT(number_high, number_low);
+}
+
+//
+// RFC 1982 Section 3.1
+//
+TEST_F(SerialTest, addition) {
+    EXPECT_EQ(two, one + one);
+    EXPECT_EQ(two, one + one_2);
+    EXPECT_EQ(max, max + min);
+    EXPECT_EQ(min, max + one);
+    EXPECT_EQ(one, max + two);
+    EXPECT_EQ(one, max + one + one);
+
+    EXPECT_EQ(one + 100, max + 102);
+    EXPECT_EQ(min + 2147483645, max + 2147483646);
+    EXPECT_EQ(min + 2147483646, max + MAX_SERIAL_INCREMENT);
+}
+
+//
+// RFC 1982 Section 3.2 has been checked by the basic tests above
+//
+
+//
+// RFC 1982 Section 4.1
+//
+
+// Helper function for addition_always_larger test, add some numbers
+// and check that the result is always larger than the original
+void do_addition_larger_test(const Serial& number) {
+    EXPECT_GE(number + 0, number);
+    EXPECT_EQ(number + 0, number);
+    EXPECT_GT(number + 1, number);
+    EXPECT_GT(number + 2, number);
+    EXPECT_GT(number + 100, number);
+    EXPECT_GT(number + 1111111, number);
+    EXPECT_GT(number + 2147483646, number);
+    EXPECT_GT(number + MAX_SERIAL_INCREMENT, number);
+    // Try MAX_SERIAL_INCREMENT as a hardcoded number as well
+    EXPECT_GT(number + 2147483647, number);
+}
+
+TEST_F(SerialTest, addition_always_larger) {
+    do_addition_larger_test(one);
+    do_addition_larger_test(two);
+    do_addition_larger_test(date_zero);
+    do_addition_larger_test(date_one);
+    do_addition_larger_test(min);
+    do_addition_larger_test(max);
+    do_addition_larger_test(number_low);
+    do_addition_larger_test(number_medium);
+    do_addition_larger_test(number_high);
+}
+
+//
+// RFC 1982 Section 4.2
+//
+
+// Helper function to do the second addition
+void
+do_two_additions_test_second(const Serial &original,
+                             const Serial &number)
+{
+    EXPECT_NE(original, number);
+    EXPECT_NE(original, number + 0);
+    EXPECT_NE(original, number + 1);
+    EXPECT_NE(original, number + 2);
+    EXPECT_NE(original, number + 100);
+    EXPECT_NE(original, number + 1111111);
+    EXPECT_NE(original, number + 2147483646);
+    EXPECT_NE(original, number + MAX_SERIAL_INCREMENT);
+    EXPECT_NE(original, number + 2147483647);
+}
+
+void do_two_additions_test_first(const Serial &number) {
+    do_two_additions_test_second(number, number + 1);
+    do_two_additions_test_second(number, number + 2);
+    do_two_additions_test_second(number, number + 100);
+    do_two_additions_test_second(number, number + 1111111);
+    do_two_additions_test_second(number, number + 2147483646);
+    do_two_additions_test_second(number, number + MAX_SERIAL_INCREMENT);
+    do_two_additions_test_second(number, number + 2147483647);
+}
+
+TEST_F(SerialTest, two_additions_never_equal) {
+    do_two_additions_test_first(one);
+    do_two_additions_test_first(two);
+    do_two_additions_test_first(date_zero);
+    do_two_additions_test_first(date_one);
+    do_two_additions_test_first(min);
+    do_two_additions_test_first(max);
+    do_two_additions_test_first(number_low);
+    do_two_additions_test_first(number_medium);
+    do_two_additions_test_first(number_high);
+}
+
+//
+// RFC 1982 Section 4.3 and 4.4 have nothing to test
+//
+
+//
+// Tests from RFC 1982 examples
+//
+TEST(SerialTextRFCExamples, rfc_example_tests) {
+}
diff --git a/src/lib/exceptions/exceptions.h b/src/lib/exceptions/exceptions.h
index 433bb7d..b68f3c4 100644
--- a/src/lib/exceptions/exceptions.h
+++ b/src/lib/exceptions/exceptions.h
@@ -126,6 +126,17 @@ public:
         isc::Exception(file, line, what) {}
 };
 
+/// \brief A generic exception that is thrown if a function is called
+/// in a prohibited way.
+///
+/// For example, this can happen if a class method is called when the object's
+/// state does not allow that particular method.
+class InvalidOperation : public Exception {
+public:
+    InvalidOperation(const char* file, size_t line, const char* what) :
+        isc::Exception(file, line, what) {}
+};
+
 ///
 /// \brief A generic exception that is thrown when an unexpected
 /// error condition occurs.
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index 957d350..286e9fd 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -46,5 +46,4 @@ if USE_CLANGPP
 liblog_la_CXXFLAGS += -Wno-error
 endif
 liblog_la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
-liblog_la_LDFLAGS  = $(LOG4CPLUS_LDFLAGS)
-liblog_la_LIBADD   = $(top_builddir)/src/lib/util/libutil.la
+liblog_la_LIBADD   = $(LOG4CPLUS_LIBS) $(top_builddir)/src/lib/util/libutil.la
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index a5f793c..53e97a1 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -48,16 +48,18 @@ endif
 noinst_PROGRAMS = logger_example
 logger_example_SOURCES = logger_example.cc
 logger_example_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-logger_example_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
-logger_example_LDADD  = $(top_builddir)/src/lib/log/liblog.la
+logger_example_LDFLAGS = $(AM_LDFLAGS)
+logger_example_LDADD  = $(LOG4CPLUS_LIBS)
+logger_example_LDADD += $(top_builddir)/src/lib/log/liblog.la
 logger_example_LDADD += $(top_builddir)/src/lib/util/libutil.la
 logger_example_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 
 noinst_PROGRAMS += init_logger_test
 init_logger_test_SOURCES = init_logger_test.cc
 init_logger_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-init_logger_test_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
-init_logger_test_LDADD  = $(top_builddir)/src/lib/log/liblog.la
+init_logger_test_LDFLAGS = $(AM_LDFLAGS)
+init_logger_test_LDADD  = $(LOG4CPLUS_LIBS)
+init_logger_test_LDADD += $(top_builddir)/src/lib/log/liblog.la
 init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
 init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 
diff --git a/src/lib/nsas/nameserver_entry.cc b/src/lib/nsas/nameserver_entry.cc
index 553c35d..bca8f73 100644
--- a/src/lib/nsas/nameserver_entry.cc
+++ b/src/lib/nsas/nameserver_entry.cc
@@ -1,4 +1,4 @@
-// Copyright (C) 2010  Internet Systems Consortium, Inc. ("ISC")
+// Copyright (C) 2010-2011  Internet Systems Consortium, Inc. ("ISC")
 //
 // Permission to use, copy, modify, and/or distribute this software for any
 // purpose with or without fee is hereby granted, provided that the above
@@ -223,7 +223,8 @@ class NameserverEntry::ResolverCallback :
          * \short We received the address successfully.
          *
          * This extracts the addresses out from the response and puts them
-         * inside the entry. It tries to reuse the address entries from before (if there were any), to keep their RTTs.
+         * inside the entry. It tries to reuse the address entries from before
+         * (if there were any), to keep their RTTs.
          */
         virtual void success(MessagePtr response_message) {
             time_t now = time(NULL);
@@ -231,10 +232,21 @@ class NameserverEntry::ResolverCallback :
             Lock lock(entry_->mutex_);
 
             // TODO: find the correct RRset, not simply the first
-            if (!response_message ||
-                response_message->getRcode() != isc::dns::Rcode::NOERROR() ||
+            if (!response_message) {
+                LOG_ERROR(nsas_logger, NSAS_NULL_RESPONSE).arg(entry_->getName());
+                failureInternal(lock);
+                return;
+
+            } else if (response_message->getRcode() != isc::dns::Rcode::NOERROR()) {
+                LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_ERROR_RESPONSE).
+                          arg(response_message->getRcode()).arg(entry_->getName());
+                failureInternal(lock);
+                return;
+
+            } else if (
                 response_message->getRRCount(isc::dns::Message::SECTION_ANSWER) == 0) {
-                LOG_ERROR(nsas_logger, NSAS_INVALID_RESPONSE).arg(entry_->getName());
+                LOG_DEBUG(nsas_logger, NSAS_DBG_RESULTS, NSAS_EMPTY_RESPONSE).
+                          arg(entry_->getName());
                 failureInternal(lock);
                 return;
             }
@@ -371,7 +383,7 @@ class NameserverEntry::ResolverCallback :
             }
         }
 
-        // Handle a failure to optain data. Dispatches callbacks and leaves
+        // Handle a failure to obtain data. Dispatches callbacks and leaves
         // lock unlocked
         void failureInternal(Lock &lock) {
             // Set state of the addresses
diff --git a/src/lib/nsas/nsas_messages.mes b/src/lib/nsas/nsas_messages.mes
index 512fcd5..6c35172 100644
--- a/src/lib/nsas/nsas_messages.mes
+++ b/src/lib/nsas/nsas_messages.mes
@@ -14,6 +14,16 @@
 
 $NAMESPACE isc::nsas
 
+% NSAS_EMPTY_RESPONSE response to query for %1 returned an empty answer section
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed.  The query completed successfully but the
+answer section in the response was empty.
+
+% NSAS_ERROR_RESPONSE error response of %1 returned in query for %2
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed.  The query completed successfully but the
+RCODE in the response was something other than NOERROR.
+
 % NSAS_FIND_NS_ADDRESS asking resolver to obtain A and AAAA records for %1
 A debug message issued when the NSAS (nameserver address store - part
 of the resolver) is making a callback into the resolver to retrieve the
@@ -24,17 +34,6 @@ A debug message issued when the NSAS (nameserver address store - part
 of the resolver) has retrieved the given address for the specified
 nameserver through an external query.
 
-% NSAS_INVALID_RESPONSE queried for %1 but got invalid response
-The NSAS (nameserver address store - part of the resolver) made a query
-for a RR for the specified nameserver but received an invalid response.
-Either the success function was called without a DNS message or the
-message was invalid on some way. (In the latter case, the error should
-have been picked up elsewhere in the processing logic, hence the raising
-of the error here.)
-
-This message indicates an internal error in the NSAS.  Please raise a
-bug report.
-
 % NSAS_LOOKUP_CANCEL lookup for zone %1 has been canceled
 A debug message issued when an NSAS (nameserver address store - part of
 the resolver) lookup for a zone has been canceled.
@@ -46,6 +45,14 @@ for the specified nameserver.  This is not necessarily a problem - the
 nameserver may be unreachable, in which case the NSAS will try other
 nameservers in the zone.
 
+% NSAS_NULL_RESPONSE got null message in success callback for query for %1
+The NSAS (nameserver address store - part of the resolver) made a query
+for information it needed.  The query completed successfully, but the
+message passed to the callback was null.
+
+This message indicates an internal error in the NSAS.  Please raise a
+bug report.
+
 % NSAS_SEARCH_ZONE_NS searching NSAS for nameservers for zone %1
 A debug message output when a call is made to the NSAS (nameserver 
 address store - part of the resolver) to obtain the nameservers for 
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
index c0f1e32..aa5d0ab 100644
--- a/src/lib/python/isc/bind10/Makefile.am
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -1,4 +1,5 @@
 SUBDIRS = . tests
 
-python_PYTHON = __init__.py sockcreator.py component.py special_component.py
+python_PYTHON = __init__.py sockcreator.py component.py special_component.py \
+		socket_cache.py
 pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
index 248bd3b..91b7064 100644
--- a/src/lib/python/isc/bind10/component.py
+++ b/src/lib/python/isc/bind10/component.py
@@ -39,6 +39,7 @@ START_CMD = 'start'
 STOP_CMD = 'stop'
 
 STARTED_OK_TIME = 10
+COMPONENT_RESTART_DELAY = 10
 
 STATE_DEAD = 'dead'
 STATE_STOPPED = 'stopped'
@@ -99,11 +100,18 @@ class BaseComponent:
             but it is vital part of the service (like auth server). If
             it fails to start or crashes in less than 10s after the first
             startup, the system is brought down. If it crashes later on,
-            it is restarted.
+            it is restarted (see below).
           * 'dispensable' means the component should be running, but if it
             doesn't start or crashes for some reason, the system simply tries
             to restart it and keeps running.
 
+        For components that are restarted, the restarts are not always
+        immediate; if the component has run for more than
+        COMPONENT_RESTART_DELAY (10) seconds, they are restarted right
+        away. If the component has not run that long, the system waits
+        until that time has passed (since the last start) until the
+        component is restarted.
+
         Note that the __init__ method of child class should have these
         parameters:
 
@@ -111,7 +119,7 @@ class BaseComponent:
 
         The extra parameters are:
         - `process` - which program should be started.
-        - `address` - the address on message buss, used to talk to the
+        - `address` - the address on message bus, used to talk to the
            component.
         - `params` - parameters to the program.
 
@@ -134,6 +142,7 @@ class BaseComponent:
         self.__state = STATE_STOPPED
         self._kind = kind
         self._boss = boss
+        self._original_start_time = None
 
     def start(self):
         """
@@ -149,6 +158,9 @@ class BaseComponent:
         logger.info(BIND10_COMPONENT_START, self.name())
         self.__state = STATE_RUNNING
         self.__start_time = time.time()
+        if self._original_start_time is None:
+            self._original_start_time = self.__start_time
+        self._restart_time = None
         try:
             self._start_internal()
         except Exception as e:
@@ -188,6 +200,11 @@ class BaseComponent:
         The exit code is used for logging. It might be None.
 
         It calls _failed_internal internally.
+
+        Returns True if the process was immediately restarted, returns
+                False is the process was not restarted, either because
+                it is considered a core or needed component, or because
+                the component is to be restarted later.
         """
         logger.error(BIND10_COMPONENT_FAILED, self.name(), self.pid(),
                      exit_code if exit_code is not None else "unknown")
@@ -199,14 +216,47 @@ class BaseComponent:
         # (including it stopped really soon)
         if self._kind == 'core' or \
             (self._kind == 'needed' and time.time() - STARTED_OK_TIME <
-             self.__start_time):
+             self._original_start_time):
             self.__state = STATE_DEAD
             logger.fatal(BIND10_COMPONENT_UNSATISFIED, self.name())
             self._boss.component_shutdown(1)
+            return False
         # This means we want to restart
         else:
-            logger.warn(BIND10_COMPONENT_RESTART, self.name())
+            # if the component was only running for a short time, don't
+            # restart right away, but set a time it wants to restarted,
+            # and return that it wants to be restarted later
+            self.set_restart_time()
+            return self.restart()
+
+    def set_restart_time(self):
+        """Calculates and sets the time this component should be restarted.
+           Currently, it uses a very basic algorithm; start time +
+           RESTART_DELAY (10 seconds). This algorithm may be improved upon
+           in the future.
+        """
+        self._restart_at = self.__start_time + COMPONENT_RESTART_DELAY
+
+    def get_restart_time(self):
+        """Returns the time at which this component should be restarted."""
+        return self._restart_at
+
+    def restart(self, now = None):
+        """Restarts the component if it has a restart_time and if the value
+           of the restart_time is smaller than 'now'.
+
+           If the parameter 'now' is given, its value will be used instead
+           of calling time.time().
+
+           Returns True if the component is restarted, False if not."""
+        if now is None:
+            now = time.time()
+        if self.get_restart_time() is not None and\
+           self.get_restart_time() < now:
             self.start()
+            return True
+        else:
+            return False
 
     def running(self):
         """
@@ -283,7 +333,7 @@ class BaseComponent:
         """
         pass
 
-    def kill(self, forcefull=False):
+    def kill(self, forceful=False):
         """
         Kills the component.
 
@@ -403,7 +453,7 @@ class Configurator:
     * `special` - Some components are started in a special way. If it is
       present, it specifies which class from the specials parameter should
       be used to create the component. In that case, some of the following
-      items might be irrelevant, depending on the special component choosen.
+      items might be irrelevant, depending on the special component chosen.
       If it is not there, the basic Component class is used.
     * `process` - Name of the executable to start. If it is not present,
       it defaults to the identifier of the component.
@@ -460,7 +510,7 @@ class Configurator:
 
         It is not expected that anyone would want to shutdown and then start
         the configurator again, so we don't explicitly make sure that would
-        work. However, we are not avare of anything that would make it not
+        work. However, we are not aware of anything that would make it not
         work either.
         """
         if not self._running:
diff --git a/src/lib/python/isc/bind10/socket_cache.py b/src/lib/python/isc/bind10/socket_cache.py
new file mode 100644
index 0000000..26e87d2
--- /dev/null
+++ b/src/lib/python/isc/bind10/socket_cache.py
@@ -0,0 +1,302 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Here's the cache for sockets from socket creator.
+"""
+
+import os
+import random
+import isc.bind10.sockcreator
+from copy import copy
+
+class SocketError(Exception):
+    """
+    Exception raised when the socket creator is unable to create requested
+    socket. Possible reasons might be the address it should be bound to
+    is already taken, the permissions are insufficient, the address family
+    is not supported on this computer and many more.
+
+    The errno, if not None, is passed from the socket creator.
+    """
+    def __init__(self, message, errno):
+        Exception.__init__(self, message)
+        self.errno = errno
+
+class ShareError(Exception):
+    """
+    The requested socket is already taken by other component and the sharing
+    parameters don't allow sharing with the new request.
+    """
+    pass
+
+class Socket:
+    """
+    This represents one socket cached by the cache program. This should never
+    be used directly by a user, it is used internally by the Cache. Therefore
+    many member variables are used directly instead of by a accessor method.
+
+    Be warned that this object implements the __del__ method. It closes the
+    socket held inside in it. But this poses various problems with garbage
+    collector. In short, do not make reference cycles with this and generally
+    leave this class alone to live peacefully.
+    """
+    def __init__(self, protocol, address, port, fileno):
+        """
+        Creates the socket.
+
+        The protocol, address and port are preserved for the information.
+        """
+        self.protocol = protocol
+        self.address = address
+        self.port = port
+        self.fileno = fileno
+        # Mapping from token -> application
+        self.active_tokens = {}
+        # The tokens which were not yet picked up
+        self.waiting_tokens = set()
+        # Share modes and names by the tokens (token -> (mode, name))
+        self.shares = {}
+
+    def __del__(self):
+        """
+        Closes the file descriptor.
+        """
+        os.close(self.fileno)
+
+    def share_compatible(self, mode, name):
+        """
+        Checks if the given share mode and name is compatible with the ones
+        already installed here.
+
+        The allowed values for mode are listed in the Cache.get_token
+        function.
+        """
+        if mode not in ['NO', 'SAMEAPP', 'ANY']:
+            raise ValueError("Mode " + mode + " is invalid")
+
+        # Go through the existing ones
+        for (emode, ename) in self.shares.values():
+            if emode == 'NO' or mode == 'NO':
+                # One of them can't live together with anything
+                return False
+            if (emode == 'SAMEAPP' or mode == 'SAMEAPP') and \
+                ename != name:
+                # One of them can't live together with someone of different
+                # name
+                return False
+            # else both are ANY or SAMEAPP with the same name, which is OK
+        # No problem found, so we consider it OK
+        return True
+
+class Cache:
+    """
+    This is the cache for sockets from socket creator. The purpose of cache
+    is to hold the sockets that were requested, until they are no longer
+    needed. One reason is, the socket is created before it is sent over the
+    unix domain socket in boss, so we need to keep it somewhere for a while.
+
+    The other reason is, a single socket might be requested multiple times.
+    So we keep it here in case someone else might ask for it.
+
+    Each socket kept here has a reference count and when it drops to zero,
+    it is removed from cache and closed.
+
+    This is expected to be part of Boss, it is not a general utility class.
+
+    It is not expected to be subclassed. The methods and members are named
+    as protected so tests are easier access into them.
+    """
+    def __init__(self, creator):
+        """
+        Initialization. The creator is the socket creator object
+        (isc.bind10.sockcreator.Creator) which will be used to create yet
+        uncached sockets.
+        """
+        self._creator = creator
+        # The sockets we have live here, these dicts are various ways how
+        # to get them. Each of them contains the Socket objects somehow
+
+        # This one is dict of token: socket for the ones that were not yet
+        # picked up by an application.
+        self._waiting_tokens = {}
+        # This format is the same as above, but for the tokens that were
+        # already picked up by the application and not yet released.
+        self._active_tokens = {}
+        # This is a dict from applications to set of tokens used by the
+        # application, for the sockets already picked up by an application
+        self._active_apps = {}
+        # The sockets live here to be indexed by protocol, address and
+        # subsequently by port
+        self._sockets = {}
+        # These are just the tokens actually in use, so we don't generate
+        # dupes. If one is dropped, it can be potentially reclaimed.
+        self._live_tokens = set()
+
+    def get_token(self, protocol, address, port, share_mode, share_name):
+        """
+        This requests a token representing a socket. The socket is either
+        found in the cache already or requested from the creator at this time
+        (and cached for later time).
+
+        The parameters are:
+        - protocol: either 'UDP' or 'TCP'
+        - address: the IPAddr object representing the address to bind to
+        - port: integer saying which port to bind to
+        - share_mode: either 'NO', 'SAMEAPP' or 'ANY', specifying how the
+          socket can be shared with others. See bin/bind10/creatorapi.txt
+          for details.
+        - share_name: the name of application, in case of 'SAMEAPP' share
+          mode. Only requests with the same name can share the socket.
+
+        If the call is successful, it returns a string token which can be
+        used to pick up the socket later. The socket is created with reference
+        count zero and if it isn't picked up soon enough (the time yet has to
+        be set), it will be removed and the token is invalid.
+
+        It can fail in various ways. Explicitly listed exceptions are:
+        - SocketError: this one is thrown if the socket creator couldn't provide
+          the socket and it is not yet cached (it belongs to other application,
+          for example).
+        - ShareError: the socket is already in the cache, but it can't be
+          shared due to share_mode and share_name combination (both the request
+          restrictions and of all copies of socket handed out are considered,
+          so it can be raised even if you call it with share_mode 'ANY').
+        - isc.bind10.sockcreator.CreatorError: fatal creator errors are
+          propagated. Thay should cause the boss to exit if ever encountered.
+
+        Note that it isn't guaranteed the tokens would be unique and they
+        should be used as an opaque handle only.
+        """
+        addr_str = str(address)
+        try:
+            socket = self._sockets[protocol][addr_str][port]
+        except KeyError:
+            # Something in the dicts is not there, so socket is to be
+            # created
+            try:
+                fileno = self._creator.get_socket(address, port, protocol)
+            except isc.bind10.sockcreator.CreatorError as ce:
+                if ce.fatal:
+                    raise
+                else:
+                    raise SocketError(str(ce), ce.errno)
+            socket = Socket(protocol, address, port, fileno)
+            # And cache it
+            if protocol not in self._sockets:
+                self._sockets[protocol] = {}
+            if addr_str not in self._sockets[protocol]:
+                self._sockets[protocol][addr_str] = {}
+            self._sockets[protocol][addr_str][port] = socket
+        # Now we get the token, check it is compatible
+        if not socket.share_compatible(share_mode, share_name):
+            raise ShareError("Cached socket not compatible with mode " +
+                             share_mode + " and name " + share_name)
+        # Grab yet unused token
+        token = 't' + str(random.randint(0, 2^32-1))
+        while token in self._live_tokens:
+            token = 't' + str(random.randint(0, 2^32-1))
+        self._waiting_tokens[token] = socket
+        self._live_tokens.add(token)
+        socket.shares[token] = (share_mode, share_name)
+        socket.waiting_tokens.add(token)
+        return token
+
+    def get_socket(self, token, application):
+        """
+        This returns the socket created by get_token. The token should be the
+        one returned from previous call from get_token. The token can be used
+        only once to receive the socket.
+
+        The application is a token representing the application that requested
+        it. Currently, boss uses the file descriptor of connection from the
+        application, but anything which can be a key in a dict is OK from the
+        cache's point of view. You just need to use the same thing in
+        drop_application.
+
+        In case the token is considered invalid (it doesn't come from the
+        get_token, it was already used, the socket wasn't picked up soon
+        enough, ...), it raises ValueError.
+        """
+        try:
+            socket = self._waiting_tokens[token]
+        except KeyError:
+            raise ValueError("Token " + token +
+                             " isn't waiting to be picked up")
+        del self._waiting_tokens[token]
+        self._active_tokens[token] = socket
+        if application not in self._active_apps:
+            self._active_apps[application] = set()
+        self._active_apps[application].add(token)
+        socket.waiting_tokens.remove(token)
+        socket.active_tokens[token] = application
+        return socket.fileno
+
+    def drop_socket(self, token):
+        """
+        This signals the application no longer uses the socket which was
+        requested by the given token. It decreases the reference count for
+        the socket and closes and removes the cached copy if it was the last
+        one.
+
+        It raises ValueError if the token doesn't exist.
+        """
+        try:
+            socket = self._active_tokens[token]
+        except KeyError:
+            raise ValueError("Token " + token + " doesn't represent an " +
+                             "active socket")
+        # Now, remove everything from the bookkeeping
+        del socket.shares[token]
+        app = socket.active_tokens[token]
+        del socket.active_tokens[token]
+        del self._active_tokens[token]
+        self._active_apps[app].remove(token)
+        if len(self._active_apps[app]) == 0:
+            del self._active_apps[app]
+        self._live_tokens.remove(token)
+        # The socket is not used by anything now, so remove it
+        if len(socket.active_tokens) == 0 and len(socket.waiting_tokens) == 0:
+            addr = str(socket.address)
+            port = socket.port
+            proto = socket.protocol
+            del self._sockets[proto][addr][port]
+            # Clean up empty branches of the structure
+            if len(self._sockets[proto][addr]) == 0:
+                del self._sockets[proto][addr]
+            if len(self._sockets[proto]) == 0:
+                del self._sockets[proto]
+
+    def drop_application(self, application):
+        """
+        This signals the application terminated and all sockets it picked up
+        should be considered unused by it now. It effectively calls drop_socket
+        on each of the sockets the application picked up and didn't drop yet.
+
+        If the application is invalid (no get_socket was successful with this
+        value of application), it raises ValueError.
+        """
+        try:
+            # Get a copy. Who knows how iteration works through sets if we
+            # delete from it during the time, so we'll just have our own copy
+            # to iterate
+            to_drop = copy(self._active_apps[application])
+        except KeyError:
+            raise ValueError("Application " + str(application) +
+                             " doesn't hold any sockets")
+        for token in to_drop:
+            self.drop_socket(token)
+        # We don't call del now. The last drop_socket should have
+        # removed the application key as well.
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
index 1c8bc64..c9c7683 100644
--- a/src/lib/python/isc/bind10/special_component.py
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -42,6 +42,7 @@ class SockCreator(BaseComponent):
         self.__creator = isc.bind10.sockcreator.Creator(LIBEXECDIR + ':' +
                                                         os.environ['PATH'])
         self._boss.register_process(self.pid(), self)
+        self._boss.set_creator(self.__creator)
         self._boss.log_started(self.pid())
 
     def _stop_internal(self):
@@ -57,8 +58,8 @@ class SockCreator(BaseComponent):
         """
         return self.__creator.pid() if self.__creator else None
 
-    def kill(self, forcefull=False):
-        # We don't really care about forcefull here
+    def kill(self, forceful=False):
+        # We don't really care about forceful here
         if self.__creator:
             self.__creator.kill()
 
@@ -108,16 +109,6 @@ class CmdCtl(Component):
         Component.__init__(self, process, boss, kind, 'Cmdctl', None,
                            boss.start_cmdctl)
 
-class XfrIn(Component):
-    def __init__(self, process, boss, kind, address=None, params=None):
-        Component.__init__(self, process, boss, kind, 'Xfrin', None,
-                           boss.start_xfrin)
-
-class XfrOut(Component):
-    def __init__(self, process, boss, kind, address=None, params=None):
-        Component.__init__(self, process, boss, kind, 'Xfrout', None,
-                           boss.start_xfrout)
-
 class SetUID(BaseComponent):
     """
     This is a pseudo-component which drops root privileges when started
@@ -135,7 +126,7 @@ class SetUID(BaseComponent):
             posix.setuid(self.uid)
 
     def _stop_internal(self): pass
-    def kill(self, forcefull=False): pass
+    def kill(self, forceful=False): pass
 
     def name(self):
         return "Set UID"
@@ -157,9 +148,6 @@ def get_specials():
         'auth': Auth,
         'resolver': Resolver,
         'cmdctl': CmdCtl,
-        # FIXME: Temporary workaround before #1292 is done
-        'xfrin': XfrIn,
-        'xfrout': XfrOut,
         # TODO: Remove when not needed, workaround before sockcreator works
         'setuid': SetUID
     }
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index df625b2..658db1e 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 #PYTESTS = args_test.py bind10_test.py
 # NOTE: this has a generated test found in the builddir
-PYTESTS = sockcreator_test.py component_test.py
+PYTESTS = sockcreator_test.py component_test.py socket_cache_test.py
 
 EXTRA_DIST = $(PYTESTS)
 
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
index 15fa470..3b49b18 100644
--- a/src/lib/python/isc/bind10/tests/component_test.py
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -86,9 +86,6 @@ class BossUtils:
     def start_cmdctl(self):
         pass
 
-    def start_xfrin(self):
-        pass
-
 class ComponentTests(BossUtils, unittest.TestCase):
     """
     Tests for the bind10.component.Component class
@@ -221,11 +218,6 @@ class ComponentTests(BossUtils, unittest.TestCase):
         """
         Check the component restarted successfully.
 
-        Currently, it is implemented as starting it again right away. This will
-        change, it will register itself into the restart schedule in boss. But
-        as the integration with boss is not clear yet, we don't know how
-        exactly that will happen.
-
         Reset the self.__start_called to False before calling the function when
         the component should fail.
         """
@@ -237,6 +229,16 @@ class ComponentTests(BossUtils, unittest.TestCase):
         # Check it can't be started again
         self.assertRaises(ValueError, component.start)
 
+    def __check_not_restarted(self, component):
+        """
+        Check the component has not (yet) restarted successfully.
+        """
+        self.assertFalse(self._shutdown)
+        self.assertTrue(self.__start_called)
+        self.assertFalse(self.__stop_called)
+        self.assertTrue(self.__failed_called)
+        self.assertFalse(component.running())
+
     def __do_start_stop(self, kind):
         """
         This is a body of a test. It creates a component of given kind,
@@ -296,7 +298,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
         component.start()
         self.__check_started(component)
         # Pretend the component died
-        component.failed(1)
+        restarted = component.failed(1)
+        # Since it is a core component, it should not be restarted
+        self.assertFalse(restarted)
         # It should bring down the whole server
         self.__check_dead(component)
 
@@ -312,7 +316,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
         self.__check_started(component)
         self._timeskip()
         # Pretend the component died some time later
-        component.failed(1)
+        restarted = component.failed(1)
+        # Should not be restarted
+        self.assertFalse(restarted)
         # Check the component is still dead
         self.__check_dead(component)
 
@@ -328,7 +334,9 @@ class ComponentTests(BossUtils, unittest.TestCase):
         component.start()
         self.__check_started(component)
         # Make it fail right away.
-        component.failed(1)
+        restarted = component.failed(1)
+        # Should not have restarted
+        self.assertFalse(restarted)
         self.__check_dead(component)
 
     def test_start_fail_needed_later(self):
@@ -344,37 +352,65 @@ class ComponentTests(BossUtils, unittest.TestCase):
         # Make it fail later on
         self.__start_called = False
         self._timeskip()
-        component.failed(1)
+        restarted = component.failed(1)
+        # Should have restarted
+        self.assertTrue(restarted)
         self.__check_restarted(component)
 
     def test_start_fail_dispensable(self):
         """
-        Start and then fail a dispensable component. Should just get restarted.
+        Start and then fail a dispensable component. Should not get restarted.
         """
         # Just ordinary startup
-        component = self.__create_component('needed')
+        component = self.__create_component('dispensable')
         self.__check_startup(component)
         component.start()
         self.__check_started(component)
         # Make it fail right away
-        self.__start_called = False
-        component.failed(1)
-        self.__check_restarted(component)
+        restarted = component.failed(1)
+        # Should signal that it did not restart
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
 
-    def test_start_fail_dispensable(self):
+    def test_start_fail_dispensable_later(self):
         """
         Start and then later on fail a dispensable component. Should just get
         restarted.
         """
         # Just ordinary startup
-        component = self.__create_component('needed')
+        component = self.__create_component('dispensable')
         self.__check_startup(component)
         component.start()
         self.__check_started(component)
         # Make it fail later on
-        self.__start_called = False
         self._timeskip()
-        component.failed(1)
+        restarted = component.failed(1)
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
+        self.__check_restarted(component)
+
+    def test_start_fail_dispensable_restart_later(self):
+        """
+        Start and then fail a dispensable component, wait a bit and try to
+        restart. Should get restarted after the wait.
+        """
+        # Just ordinary startup
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        component.start()
+        self.__check_started(component)
+        # Make it fail immediately
+        restarted = component.failed(1)
+        # should signal that it did not restart
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
+        self._timeskip()
+        # try to restart again
+        restarted = component.restart()
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
         self.__check_restarted(component)
 
     def test_fail_core(self):
@@ -402,14 +438,56 @@ class ComponentTests(BossUtils, unittest.TestCase):
     def test_fail_dispensable(self):
         """
         Failure to start a dispensable component. The exception should get
-        through, but it should be restarted.
+        through, but it should be restarted after a time skip.
         """
         component = self.__create_component('dispensable')
         self.__check_startup(component)
         component._start_internal = self.__fail_to_start
         self.assertRaises(TestError, component.start)
+        # tell it to see if it must restart
+        restarted = component.restart()
+        # should not have restarted yet
+        self.assertFalse(restarted)
+        self.__check_not_restarted(component)
+        self._timeskip()
+        # tell it to see if it must restart and do so, with our vision of time
+        restarted = component.restart()
+        # should have restarted now
+        self.assertTrue(restarted)
         self.__check_restarted(component)
 
+    def test_component_start_time(self):
+        """
+        Check that original start time is set initially, and remains the same
+        after a restart, while the internal __start_time does change
+        """
+        # Just ordinary startup
+        component = self.__create_component('dispensable')
+        self.__check_startup(component)
+        self.assertIsNone(component._original_start_time)
+        component.start()
+        self.__check_started(component)
+
+        self.assertIsNotNone(component._original_start_time)
+        self.assertIsNotNone(component._BaseComponent__start_time)
+        original_start_time = component._original_start_time
+        start_time = component._BaseComponent__start_time
+        # Not restarted yet, so they should be the same
+        self.assertEqual(original_start_time, start_time)
+
+        self._timeskip()
+        # Make it fail
+        restarted = component.failed(1)
+        # should signal that it restarted
+        self.assertTrue(restarted)
+        # and check if it really did
+        self.__check_restarted(component)
+
+        # original start time should not have changed
+        self.assertEqual(original_start_time, component._original_start_time)
+        # but actual start time should
+        self.assertNotEqual(start_time, component._BaseComponent__start_time)
+
     def test_bad_kind(self):
         """
         Test the component rejects nonsensical kinds. This includes bad
@@ -430,7 +508,6 @@ class ComponentTests(BossUtils, unittest.TestCase):
                                isc.bind10.special_component.Auth,
                                isc.bind10.special_component.Resolver,
                                isc.bind10.special_component.CmdCtl,
-                               isc.bind10.special_component.XfrIn,
                                isc.bind10.special_component.SetUID]:
             component = component_type('none', self, 'needed')
             self.assertIsNone(component.pid())
@@ -592,7 +669,7 @@ class TestComponent(BaseComponent):
     def _failed_internal(self):
         self.log('failed')
 
-    def kill(self, forcefull=False):
+    def kill(self, forceful=False):
         self.log('killed')
 
 class FailComponent(BaseComponent):
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
index 4453184..d97d21b 100644
--- a/src/lib/python/isc/bind10/tests/sockcreator_test.py
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -13,9 +13,6 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
-# This test file is generated .py.in -> .py just to be in the build dir,
-# same as the rest of the tests. Saves a lot of stuff in makefile.
-
 """
 Tests for the bind10.sockcreator module.
 """
diff --git a/src/lib/python/isc/bind10/tests/socket_cache_test.py b/src/lib/python/isc/bind10/tests/socket_cache_test.py
new file mode 100644
index 0000000..bbbf776
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/socket_cache_test.py
@@ -0,0 +1,396 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import isc.log
+import isc.bind10.socket_cache
+import isc.bind10.sockcreator
+from isc.net.addr import IPAddr
+import os
+
+class Test(unittest.TestCase):
+    """
+    Base for the tests here. It replaces the os.close method.
+    """
+    def setUp(self):
+        self._closes = []
+        isc.bind10.socket_cache.os.close = self.__close
+
+    def tearDown(self):
+        # This is not very clean solution. But when the test stops
+        # to exist, the method must not be used to destroy the
+        # object any more. And we can't restore the os.close here
+        # as we never work with real sockets here.
+        isc.bind10.socket_cache.os.close = lambda fd: None
+
+    def __close(self, fd):
+        """
+        Just log a close was called.
+        """
+        self._closes.append(fd)
+
+class SocketTest(Test):
+    """
+    Test for the Socket class.
+    """
+    def setUp(self):
+        """
+        Creates the socket to be tested.
+
+        It also creates other useful test variables.
+        """
+        Test.setUp(self)
+        self.__address = IPAddr("192.0.2.1")
+        self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+                                                       1024, 42)
+
+    def test_init(self):
+        """
+        Checks the intrnals of the cache just after the creation.
+        """
+        self.assertEqual('UDP', self.__socket.protocol)
+        self.assertEqual(self.__address, self.__socket.address)
+        self.assertEqual(1024, self.__socket.port)
+        self.assertEqual(42, self.__socket.fileno)
+        self.assertEqual({}, self.__socket.active_tokens)
+        self.assertEqual({}, self.__socket.shares)
+        self.assertEqual(set(), self.__socket.waiting_tokens)
+
+    def test_del(self):
+        """
+        Check it closes the socket when removed.
+        """
+        # This should make the refcount 0 and call the descructor
+        # right away
+        self.__socket = None
+        self.assertEqual([42], self._closes)
+
+    def test_share_modes(self):
+        """
+        Test the share mode compatibility check function.
+        """
+        modes = ['NO', 'SAMEAPP', 'ANY']
+        # If there are no shares, it is compatible with everything.
+        for mode in modes:
+            self.assertTrue(self.__socket.share_compatible(mode, 'anything'))
+
+        # There's an NO already, so it is incompatible with everything.
+        self.__socket.shares = {'token': ('NO', 'anything')}
+        for mode in modes:
+            self.assertFalse(self.__socket.share_compatible(mode, 'anything'))
+
+        # If there's SAMEAPP, it is compatible with ANY and SAMEAPP with the
+        # same name.
+        self.__socket.shares = {'token': ('SAMEAPP', 'app')}
+        self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+        self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+                                                        'something'))
+        self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+        self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+        self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+
+        # If there's ANY, then ANY and SAMEAPP with the same name is compatible
+        self.__socket.shares = {'token': ('ANY', 'app')}
+        self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+        self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+                                                        'something'))
+        self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+        self.assertTrue(self.__socket.share_compatible('ANY', 'something'))
+
+        # In case there are multiple already inside
+        self.__socket.shares = {
+            'token': ('ANY', 'app'),
+            'another': ('SAMEAPP', 'app')
+        }
+        self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+        self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+                                                        'something'))
+        self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+        self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+        self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+
+        # Invalid inputs are rejected
+        self.assertRaises(ValueError, self.__socket.share_compatible, 'bad',
+                          'bad')
+
+class SocketCacheTest(Test):
+    """
+    Some tests for the isc.bind10.socket_cache.Cache.
+
+    This class, as well as being the testcase, pretends to be the
+    socket creator so it can hijack all the requests for sockets.
+    """
+    def setUp(self):
+        """
+        Creates the cache for tests with us being the socket creator.
+
+        Also creates some more variables for testing.
+        """
+        Test.setUp(self)
+        self.__cache = isc.bind10.socket_cache.Cache(self)
+        self.__address = IPAddr("192.0.2.1")
+        self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+                                                       1024, 42)
+        self.__get_socket_called = False
+
+    def test_init(self):
+        """
+        Checks the internals of the cache just after the creation.
+        """
+        self.assertEqual(self, self.__cache._creator)
+        self.assertEqual({}, self.__cache._waiting_tokens)
+        self.assertEqual({}, self.__cache._active_tokens)
+        self.assertEqual({}, self.__cache._active_apps)
+        self.assertEqual({}, self.__cache._sockets)
+        self.assertEqual(set(), self.__cache._live_tokens)
+
+    def get_socket(self, address, port, socktype):
+        """
+        Pretend to be a socket creator.
+
+        This expects to be called with the _address, port 1024 and 'UDP'.
+
+        Returns 42 and notes down it was called.
+        """
+        self.assertEqual(self.__address, address)
+        self.assertEqual(1024, port)
+        self.assertEqual('UDP', socktype)
+        self.__get_socket_called = True
+        return 42
+
+    def test_get_token_cached(self):
+        """
+        Check the behaviour of get_token when the requested socket is already
+        cached inside.
+        """
+        self.__cache._sockets = {
+            'UDP': {'192.0.2.1': {1024: self.__socket}}
+        }
+        token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+                                       'test')
+        # It didn't call get_socket
+        self.assertFalse(self.__get_socket_called)
+        # It returned something
+        self.assertIsNotNone(token)
+        # The token is both in the waiting sockets and the live tokens
+        self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+        self.assertEqual(set([token]), self.__cache._live_tokens)
+        # The token got the new share to block any relevant queries
+        self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+        # The socket knows the token is waiting in it
+        self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+        # If we request one more, with incompatible share, it is rejected
+        self.assertRaises(isc.bind10.socket_cache.ShareError,
+                          self.__cache.get_token, 'UDP', self.__address, 1024,
+                          'NO', 'test')
+        # The internals are not changed, so the same checks
+        self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+        self.assertEqual(set([token]), self.__cache._live_tokens)
+        self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+        self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+    def test_get_token_uncached(self):
+        """
+        Check a new socket is created when a corresponding one is missing.
+        """
+        token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+                                       'test')
+        # The get_socket was called
+        self.assertTrue(self.__get_socket_called)
+        # It returned something
+        self.assertIsNotNone(token)
+        # Get the socket and check it looks OK
+        socket = self.__cache._waiting_tokens[token]
+        self.assertEqual(self.__address, socket.address)
+        self.assertEqual(1024, socket.port)
+        self.assertEqual(42, socket.fileno)
+        self.assertEqual('UDP', socket.protocol)
+        # The socket is properly cached
+        self.assertEqual({
+            'UDP': {'192.0.2.1': {1024: socket}}
+        }, self.__cache._sockets)
+        # The token is both in the waiting sockets and the live tokens
+        self.assertEqual({token: socket}, self.__cache._waiting_tokens)
+        self.assertEqual(set([token]), self.__cache._live_tokens)
+        # The token got the new share to block any relevant queries
+        self.assertEqual({token: ('ANY', 'test')}, socket.shares)
+        # The socket knows the token is waiting in it
+        self.assertEqual(set([token]), socket.waiting_tokens)
+
+    def test_get_token_excs(self):
+        """
+        Test that it is handled properly if the socket creator raises
+        some exceptions.
+        """
+        def raiseCreatorError(fatal):
+            raise isc.bind10.sockcreator.CreatorError('test error', fatal)
+        # First, fatal socket creator errors are passed through
+        self.get_socket = lambda addr, port, proto: raiseCreatorError(True)
+        self.assertRaises(isc.bind10.sockcreator.CreatorError,
+                          self.__cache.get_token, 'UDP', self.__address, 1024,
+                          'NO', 'test')
+        # And nonfatal are converted to SocketError
+        self.get_socket = lambda addr, port, proto: raiseCreatorError(False)
+        self.assertRaises(isc.bind10.socket_cache.SocketError,
+                          self.__cache.get_token, 'UDP', self.__address, 1024,
+                          'NO', 'test')
+
+    def test_get_socket(self):
+        """
+        Test that we can pickup a socket if we know a token.
+        """
+        token = "token"
+        app = 13
+        # No socket prepared there
+        self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+        # Not changed
+        self.assertEqual({}, self.__cache._active_tokens)
+        self.assertEqual({}, self.__cache._active_apps)
+        self.assertEqual({}, self.__cache._sockets)
+        self.assertEqual(set(), self.__cache._live_tokens)
+        # Prepare a token there
+        self.__socket.waiting_tokens = set([token])
+        self.__socket.shares = {token: ('ANY', 'app')}
+        self.__cache._waiting_tokens = {token: self.__socket}
+        self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+        self.__cache._live_tokens = set([token])
+        socket = self.__cache.get_socket(token, app)
+        # Received the fileno
+        self.assertEqual(42, socket)
+        # It moved from waiting to active ones
+        self.assertEqual({}, self.__cache._waiting_tokens)
+        self.assertEqual({token: self.__socket}, self.__cache._active_tokens)
+        self.assertEqual({13: set([token])}, self.__cache._active_apps)
+        self.assertEqual(set([token]), self.__cache._live_tokens)
+        self.assertEqual(set(), self.__socket.waiting_tokens)
+        self.assertEqual({token: 13}, self.__socket.active_tokens)
+        # Trying to get it again fails
+        self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+
+    def test_drop_application(self):
+        """
+        Test that a drop_application calls drop_socket on all the sockets
+        held by the application.
+        """
+        sockets = set()
+        def drop_socket(token):
+            sockets.add(token)
+        # Mock the drop_socket so we know it is called
+        self.__cache.drop_socket = drop_socket
+        self.assertRaises(ValueError, self.__cache.drop_application,
+                          13)
+        self.assertEqual(set(), sockets)
+        # Put the tokens into active_apps. Nothing else should be touched
+        # by this call, so leave it alone.
+        self.__cache._active_apps = {
+            1: set(['t1', 't2']),
+            2: set(['t3'])
+        }
+        self.__cache.drop_application(1)
+        # We don't check the _active_apps, as it would be cleaned by
+        # drop_socket and we removed it.
+        self.assertEqual(set(['t1', 't2']), sockets)
+
+    def test_drop_socket(self):
+        """
+        Test the drop_socket call. It tests:
+        * That a socket that still has something to keep it alive is left alive
+          (both waiting and active).
+        * If not, it is deleted.
+        * All bookkeeping data around are properly removed.
+        * Of course the exception.
+        """
+        self.assertRaises(ValueError, self.__cache.drop_socket, "bad token")
+        self.__socket.active_tokens = {'t1': 1}
+        self.__socket.waiting_tokens = set(['t2'])
+        self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+        self.__cache._waiting_tokens = {'t2': self.__socket}
+        self.__cache._active_tokens = {'t1': self.__socket}
+        self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+        self.__cache._live_tokens = set(['t1', 't2'])
+        self.__cache._active_apps = {1: set(['t1'])}
+        # We can't drop what wasn't picket up yet
+        self.assertRaises(ValueError, self.__cache.drop_socket, 't2')
+        self.assertEqual({'t1': 1}, self.__socket.active_tokens)
+        self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+        self.assertEqual({'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')},
+                         self.__socket.shares)
+        self.assertEqual({'t2': self.__socket}, self.__cache._waiting_tokens)
+        self.assertEqual({'t1': self.__socket}, self.__cache._active_tokens)
+        self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+                         self.__cache._sockets)
+        self.assertEqual(set(['t1', 't2']), self.__cache._live_tokens)
+        self.assertEqual({1: set(['t1'])}, self.__cache._active_apps)
+        self.assertEqual([], self._closes)
+        # If we drop this, it survives because it waits for being picked up
+        self.__cache.drop_socket('t1')
+        self.assertEqual({}, self.__socket.active_tokens)
+        self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+        self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+        self.assertEqual({}, self.__cache._active_tokens)
+        self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+                         self.__cache._sockets)
+        self.assertEqual(set(['t2']), self.__cache._live_tokens)
+        self.assertEqual({}, self.__cache._active_apps)
+        self.assertEqual([], self._closes)
+        # Fill it again, now two applications having the same socket
+        self.__socket.active_tokens = {'t1': 1, 't2': 2}
+        self.__socket.waiting_tokens = set()
+        self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+        self.__cache._waiting_tokens = {}
+        self.__cache._active_tokens = {
+            't1': self.__socket,
+            't2': self.__socket
+        }
+        self.__cache._live_tokens = set(['t1', 't2', 't3'])
+        self.assertEqual([], self._closes)
+        # We cheat here little bit, the t3 doesn't exist enywhere else, but
+        # we need to check the app isn't removed too soon and it shouldn't
+        # matter anywhere else, so we just avoid the tiresome filling in
+        self.__cache._active_apps = {1: set(['t1', 't3']), 2: set(['t2'])}
+        # Drop it as t1. It should still live.
+        self.__cache.drop_socket('t1')
+        self.assertEqual({'t2': 2}, self.__socket.active_tokens)
+        self.assertEqual(set(), self.__socket.waiting_tokens)
+        self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+        self.assertEqual({}, self.__cache._waiting_tokens)
+        self.assertEqual({'t2': self.__socket}, self.__cache._active_tokens)
+        self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+                         self.__cache._sockets)
+        self.assertEqual(set(['t3', 't2']), self.__cache._live_tokens)
+        self.assertEqual({1: set(['t3']), 2: set(['t2'])},
+                         self.__cache._active_apps)
+        self.assertEqual([], self._closes)
+        # Drop it again, from the other application. It should get removed
+        # and closed.
+        self.__cache.drop_socket('t2')
+        self.assertEqual({}, self.__socket.active_tokens)
+        self.assertEqual(set(), self.__socket.waiting_tokens)
+        self.assertEqual({}, self.__socket.shares)
+        self.assertEqual({}, self.__cache._waiting_tokens)
+        self.assertEqual({}, self.__cache._active_tokens)
+        self.assertEqual({}, self.__cache._sockets)
+        self.assertEqual(set(['t3']), self.__cache._live_tokens)
+        self.assertEqual({1: set(['t3'])}, self.__cache._active_apps)
+        # The cache doesn't hold the socket. So when we remove it ourself,
+        # it should get closed.
+        self.__socket = None
+        self.assertEqual([42], self._closes)
+
+if __name__ == '__main__':
+    isc.log.init("bind10")
+    isc.log.resetUnitTestRootLogger()
+    unittest.main()
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index a5b4ca3..47f3dbc 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -17,12 +17,14 @@ datasrc_la_SOURCES += client_python.cc client_python.h
 datasrc_la_SOURCES += iterator_python.cc iterator_python.h
 datasrc_la_SOURCES += finder_python.cc finder_python.h
 datasrc_la_SOURCES += updater_python.cc updater_python.h
+datasrc_la_SOURCES += journal_reader_python.cc journal_reader_python.h
 
 datasrc_la_CPPFLAGS = $(AM_CPPFLAGS) $(PYTHON_INCLUDES)
 datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
 datasrc_la_LDFLAGS += -module
 datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
 datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
 datasrc_la_LIBADD += $(PYTHON_LIB)
 
@@ -30,6 +32,7 @@ EXTRA_DIST = client_inc.cc
 EXTRA_DIST += finder_inc.cc
 EXTRA_DIST += iterator_inc.cc
 EXTRA_DIST += updater_inc.cc
+EXTRA_DIST += journal_reader_inc.cc
 
 CLEANDIRS = __pycache__
 
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
index f4fb01b..e0c0f06 100644
--- a/src/lib/python/isc/datasrc/client_inc.cc
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -190,4 +190,60 @@ Parameters:\n\
   journaling The zone updater should store a journal of the changes.\n\
 \n\
 ";
+
+// Modifications from C++ doc:
+//   pointer -> (removed)
+//   Null -> None
+//   exception types
+const char* const DataSourceClient_getJournalReader_doc = "\
+get_journal_reader(zone, begin_serial, end_serial) ->\n\
+   (int, ZoneJournalReader)\n\
+\n\
+Return a journal reader to retrieve differences of a zone.\n\
+\n\
+A derived version of this method creates a concrete ZoneJournalReader\n\
+object specific to the underlying data source for the specified name\n\
+of zone and differences between the versions specified by the\n\
+beginning and ending serials of the corresponding SOA RRs. The RR\n\
+class of the zone is the one that the client is expected to handle\n\
+(see the detailed description of this class).\n\
+\n\
+Note that the SOA serials are compared by the semantics of the serial\n\
+number arithmetic. So, for example, begin_serial can be larger than\n\
+end_serial as bare unsigned integers. The underlying data source\n\
+implementation is assumed to keep track of sufficient history to\n\
+identify (if exist) the corresponding difference between the specified\n\
+versions.\n\
+\n\
+This method returns the result as a pair of a result code and a\n\
+ZoneJournalReader object. On success, the result code is\n\
+SUCCESS and the object must not be None; otherwise the result code is\n\
+something other than SUCCESS and the object must be None.\n\
+\n\
+If the specified zone is not found in the data source, the result code\n\
+is NO_SUCH_ZONE. Otherwise, if specified range of difference for the\n\
+zone is not found in the data source, the result code is\n\
+NO_SUCH_VERSION.\n\
+\n\
+Handling differences is an optional feature of data source. If the\n\
+underlying data source does not support difference handling, this\n\
+method for that type of data source can throw an exception of class\n\
+isc.datasrc.NotImplemented.\n\
+\n\
+Exceptions:\n\
+  isc.datasrc.NotImplemented The data source does not support differences.\n\
+  isc.datasrc.Error Other operational errors at the data source level.\n\
+  SystemError An unexpected error in the backend C++ code.  Either a rare\n\
+              system error such as short memory or an implementation bug.\n\
+\n\
+Parameters:\n\
+  zone       The name of the zone for which the difference should be\n\
+             retrieved.\n\
+  begin_serial The SOA serial of the beginning version of the\n\
+             differences.\n\
+  end_serial The SOA serial of the ending version of the differences.\n\
+\n\
+Return Value(s): A pair of result code and a ZoneJournalReader object\n\
+(which can be None)\n                                                  \
+";
 } // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index 2740355..bdf84a3 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -38,6 +38,7 @@
 #include "finder_python.h"
 #include "iterator_python.h"
 #include "updater_python.h"
+#include "journal_reader_python.h"
 #include "client_inc.cc"
 
 using namespace std;
@@ -173,6 +174,43 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
     }
 }
 
+PyObject*
+DataSourceClient_getJournalReader(PyObject* po_self, PyObject* args) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
+    PyObject *name_obj;
+    unsigned long begin_obj, end_obj;
+
+    if (PyArg_ParseTuple(args, "O!kk", &name_type, &name_obj,
+                         &begin_obj, &end_obj)) {
+        try {
+            pair<ZoneJournalReader::Result, ZoneJournalReaderPtr> result =
+                self->cppobj->getInstance().getJournalReader(
+                    PyName_ToName(name_obj), static_cast<uint32_t>(begin_obj),
+                    static_cast<uint32_t>(end_obj));
+            PyObject* po_reader;
+            if (result.first == ZoneJournalReader::SUCCESS) {
+                po_reader = createZoneJournalReaderObject(result.second,
+                                                          po_self);
+            } else {
+                po_reader = Py_None;
+                Py_INCREF(po_reader); // this will soon be released
+            }
+            PyObjectContainer container(po_reader);
+            return (Py_BuildValue("(iO)", result.first, container.get()));
+        } catch (const isc::NotImplemented& ex) {
+            PyErr_SetString(getDataSourceException("NotImplemented"),
+                            ex.what());
+        } catch (const DataSourceError& ex) {
+            PyErr_SetString(getDataSourceException("Error"), ex.what());
+        } catch (const std::exception& ex) {
+            PyErr_SetString(PyExc_SystemError, ex.what());
+        } catch (...) {
+            PyErr_SetString(PyExc_SystemError, "Unexpected exception");
+        }
+    }
+    return (NULL);
+}
+
 // This list contains the actual set of functions we have in
 // python. Each entry has
 // 1. Python method name
@@ -180,18 +218,21 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
 // 3. Argument type
 // 4. Documentation
 PyMethodDef DataSourceClient_methods[] = {
-    { "find_zone", reinterpret_cast<PyCFunction>(DataSourceClient_findZone),
-      METH_VARARGS, DataSourceClient_findZone_doc },
+    { "find_zone", DataSourceClient_findZone, METH_VARARGS,
+      DataSourceClient_findZone_doc },
     { "get_iterator",
-      reinterpret_cast<PyCFunction>(DataSourceClient_getIterator), METH_VARARGS,
+      DataSourceClient_getIterator, METH_VARARGS,
       DataSourceClient_getIterator_doc },
-    { "get_updater", reinterpret_cast<PyCFunction>(DataSourceClient_getUpdater),
+    { "get_updater", DataSourceClient_getUpdater,
       METH_VARARGS, DataSourceClient_getUpdater_doc },
+    { "get_journal_reader", DataSourceClient_getJournalReader,
+      METH_VARARGS, DataSourceClient_getJournalReader_doc },
     { NULL, NULL, 0, NULL }
 };
 
 int
-DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
+DataSourceClient_init(PyObject* po_self, PyObject* args, PyObject*) {
+    s_DataSourceClient* self = static_cast<s_DataSourceClient*>(po_self);
     char* ds_type_str;
     char* ds_config_str;
     try {
@@ -236,7 +277,8 @@ DataSourceClient_init(s_DataSourceClient* self, PyObject* args) {
 }
 
 void
-DataSourceClient_destroy(s_DataSourceClient* const self) {
+DataSourceClient_destroy(PyObject* po_self) {
+    s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
     delete self->cppobj;
     self->cppobj = NULL;
     Py_TYPE(self)->tp_free(self);
@@ -255,7 +297,7 @@ PyTypeObject datasourceclient_type = {
     "datasrc.DataSourceClient",
     sizeof(s_DataSourceClient),         // tp_basicsize
     0,                                  // tp_itemsize
-    reinterpret_cast<destructor>(DataSourceClient_destroy),// tp_dealloc
+    DataSourceClient_destroy,           // tp_dealloc
     NULL,                               // tp_print
     NULL,                               // tp_getattr
     NULL,                               // tp_setattr
@@ -286,7 +328,7 @@ PyTypeObject datasourceclient_type = {
     NULL,                               // tp_descr_get
     NULL,                               // tp_descr_set
     0,                                  // tp_dictoffset
-    reinterpret_cast<initproc>(DataSourceClient_init),// tp_init
+    DataSourceClient_init,              // tp_init
     NULL,                               // tp_alloc
     PyType_GenericNew,                  // tp_new
     NULL,                               // tp_free
diff --git a/src/lib/python/isc/datasrc/datasrc.cc b/src/lib/python/isc/datasrc/datasrc.cc
index 6ab29d8..1573b81 100644
--- a/src/lib/python/isc/datasrc/datasrc.cc
+++ b/src/lib/python/isc/datasrc/datasrc.cc
@@ -27,6 +27,7 @@
 #include "finder_python.h"
 #include "iterator_python.h"
 #include "updater_python.h"
+#include "journal_reader_python.h"
 
 #include <util/python/pycppwrapper_util.h>
 #include <dns/python/pydnspp_common.h>
@@ -192,6 +193,41 @@ initModulePart_ZoneUpdater(PyObject* mod) {
     return (true);
 }
 
+bool
+initModulePart_ZoneJournalReader(PyObject* mod) {
+    if (PyType_Ready(&journal_reader_type) < 0) {
+        return (false);
+    }
+    void* p = &journal_reader_type;
+    if (PyModule_AddObject(mod, "ZoneJournalReader",
+                           static_cast<PyObject*>(p)) < 0) {
+        return (false);
+    }
+    Py_INCREF(&journal_reader_type);
+
+    try {
+        installClassVariable(journal_reader_type, "SUCCESS",
+                             Py_BuildValue("I", ZoneJournalReader::SUCCESS));
+        installClassVariable(journal_reader_type, "NO_SUCH_ZONE",
+                             Py_BuildValue("I",
+                                           ZoneJournalReader::NO_SUCH_ZONE));
+        installClassVariable(journal_reader_type, "NO_SUCH_VERSION",
+                             Py_BuildValue("I",
+                                           ZoneJournalReader::NO_SUCH_VERSION));
+    } catch (const std::exception& ex) {
+        const std::string ex_what =
+            "Unexpected failure in ZoneJournalReader initialization: " +
+            std::string(ex.what());
+        PyErr_SetString(po_IscException, ex_what.c_str());
+        return (false);
+    } catch (...) {
+        PyErr_SetString(PyExc_SystemError,
+            "Unexpected failure in ZoneJournalReader initialization");
+        return (false);
+    }
+
+    return (true);
+}
 
 PyObject* po_DataSourceError;
 PyObject* po_NotImplemented;
@@ -239,6 +275,11 @@ PyInit_datasrc(void) {
         return (NULL);
     }
 
+    if (!initModulePart_ZoneJournalReader(mod)) {
+        Py_DECREF(mod);
+        return (NULL);
+    }
+
     try {
         po_DataSourceError = PyErr_NewException("isc.datasrc.Error", NULL,
                                                 NULL);
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
index 4a00e78..82c5fdc 100644
--- a/src/lib/python/isc/datasrc/finder_inc.cc
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -46,6 +46,7 @@ Return the RR class of the zone.\n\
 // - Return type: use tuple instead of the dedicated FindResult type
 // - NULL->None
 // - exceptions
+// - description of the 'target' parameter (must be None for now)
 const char* const ZoneFinder_find_doc = "\
 find(name, type, target=None, options=FIND_DEFAULT) -> (integer, RRset)\n\
 \n\
@@ -74,6 +75,7 @@ answer for the search key. Specifically,\n\
 - If the target isn't None, all RRsets under the domain are inserted\n\
   there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
   instead of normall processing. This is intended to handle ANY query.\n\
+  (Note: the Python version doesn't support this feature yet)\n\
 \n\
 Note: This behavior is controversial as we discussed in\n\
 https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html We\n\
@@ -105,8 +107,7 @@ internal error in the datasource.\n\
 Parameters:\n\
   name       The domain name to be searched for.\n\
   type       The RR type to be searched for.\n\
-  target     If target is not None, insert all RRs under the domain\n\
-             into it.\n\
+  target     Must be None.\n\
   options    The search options.\n\
 \n\
 Return Value(s): A tuple of a result code (integer) and an RRset object\n\
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
index 6585049..7f74133 100644
--- a/src/lib/python/isc/datasrc/finder_python.cc
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -53,26 +53,29 @@ namespace isc_datasrc_internal {
 PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
     if (finder == NULL) {
         PyErr_SetString(getDataSourceException("Error"),
-                        "Internal error in find() wrapper; finder object NULL");
+                        "Internal error in find() wrapper; "
+                        "finder object NULL");
         return (NULL);
     }
-    PyObject *name;
-    PyObject *rrtype;
-    PyObject *target;
-    int options_int;
-    if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+    PyObject* name;
+    PyObject* rrtype;
+    PyObject* target = Py_None;
+    unsigned int options_int = ZoneFinder::FIND_DEFAULT;
+    if (PyArg_ParseTuple(args, "O!O!|OI", &name_type, &name,
                                          &rrtype_type, &rrtype,
                                          &target, &options_int)) {
         try {
+            if (target != Py_None) {
+                PyErr_SetString(PyExc_TypeError,
+                                "find(): target must be None in this version");
+                return (NULL);
+            }
             ZoneFinder::FindOptions options =
                 static_cast<ZoneFinder::FindOptions>(options_int);
-            ZoneFinder::FindResult find_result(
-                finder->find(PyName_ToName(name),
-                                   PyRRType_ToRRType(rrtype),
-                                   NULL,
-                                   options
-                                   ));
-            ZoneFinder::Result r = find_result.code;
+            const ZoneFinder::FindResult find_result(
+                finder->find(PyName_ToName(name), PyRRType_ToRRType(rrtype),
+                             NULL, options));
+            const ZoneFinder::Result r = find_result.code;
             isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
             if (rrsp) {
                 // Use N instead of O so the refcount isn't increased twice
diff --git a/src/lib/python/isc/datasrc/journal_reader_inc.cc b/src/lib/python/isc/datasrc/journal_reader_inc.cc
new file mode 100644
index 0000000..35ba70e
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_inc.cc
@@ -0,0 +1,80 @@
+namespace {
+const char* const ZoneJournalReader_doc = "\
+The base class for retrieving differences between two versions of a\n\
+zone.\n\
+\n\
+On construction, each derived class object will internally set up\n\
+retrieving sequences of differences between two specific version of a\n\
+specific zone managed in a particular data source. So the constructor\n\
+of a derived class would normally take parameters to identify the zone\n\
+and the two versions for which the differences should be retrieved.\n\
+See DataSourceClient.get_journal_reader for more concrete details used\n\
+in this API.\n\
+\n\
+Once constructed, an object of this class will act like an iterator\n\
+over the sequences. Every time the get_next_diff() method is called it\n\
+returns one element of the differences in the form of an RRset until\n\
+it reaches the end of the entire sequences.\n\
+\n\
+";
+
+// Modifications from C++ doc:
+//   ConstRRsetPtr -> RRset
+//   Null -> None
+//   InvalidOperation -> ValueError
+const char* const ZoneJournalReader_getNextDiff_doc = "\
+get_next_diff() -> isc.dns.RRset\n\
+\n\
+Return the next difference RR of difference sequences.\n\
+\n\
+In this API, the difference between two versions of a zone is\n\
+conceptually represented as IXFR-style difference sequences: Each\n\
+difference sequence is a sequence of RRs: an older version of SOA (to\n\
+be deleted), zero or more other deleted RRs, the post-transaction SOA\n\
+(to be added), and zero or more other added RRs. (Note, however, that\n\
+the underlying data source implementation may or may not represent the\n\
+difference in straightforward realization of this concept. The mapping\n\
+between the conceptual difference and the actual implementation is\n\
+hidden in each derived class).\n\
+\n\
+This method provides an application with a higher level interface to\n\
+retrieve the difference along with the conceptual model: the\n\
+ZoneJournalReader object iterates over the entire sequences from the\n\
+beginning SOA (which is to be deleted) to one of the added RR of with\n\
+the ending SOA, and each call to this method returns one RR in the\n\
+form of an RRset that contains exactly one RDATA in the order of the\n\
+sequences.\n\
+\n\
+Note that the ordering of the sequences specifies the semantics of\n\
+each difference: add or delete. For example, the first RR is to be\n\
+deleted, and the last RR is to be added. So the return value of this\n\
+method does not explicitly indicate whether the RR is to be added or\n\
+deleted.\n\
+\n\
+This method ensures the returned RRset represents an RR, that is, it\n\
+contains exactly one RDATA. However, it does not necessarily ensure\n\
+that the resulting sequences are in the form of IXFR-style. For\n\
+example, the first RR is supposed to be an SOA, and it should normally\n\
+be the case, but this interface does not necessarily require the\n\
+derived class implementation ensure this. Normally the differences are\n\
+expected to be stored using this API (via a ZoneUpdater object), and\n\
+as long as that is the case and the underlying implementation follows\n\
+the requirement of the API, the result of this method should be a\n\
+valid IXFR-style sequences. So this API does not mandate the almost\n\
+redundant check as part of the interface. If the application needs to\n\
+make it sure 100%, it must check the resulting sequence itself.\n\
+\n\
+Once the object reaches the end of the sequences, this method returns\n\
+None. Any subsequent call will result in an exception of class\n\
+ValueError.\n\
+\n\
+Exceptions:\n\
+  ValueError The method is called beyond the end of the\n\
+             difference sequences.\n\
+  isc.datasrc.Error Underlying data is broken and the RR cannot be\n\
+             created or other low level data source error.\n\
+\n\
+Return Value(s): An RRset that contains one RDATA corresponding to the\n\
+next difference in the sequences.\n\
+";
+} // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.cc b/src/lib/python/isc/datasrc/journal_reader_python.cc
new file mode 100644
index 0000000..ff398d1
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.cc
@@ -0,0 +1,200 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+// Python.h needs to be placed at the head of the program file, see:
+// http://docs.python.org/py3k/extending/extending.html#a-simple-example
+#include <Python.h>
+
+#include <util/python/pycppwrapper_util.h>
+
+#include <datasrc/client.h>
+#include <datasrc/database.h>
+
+#include <dns/python/rrset_python.h>
+
+#include "datasrc.h"
+#include "journal_reader_python.h"
+
+#include "journal_reader_inc.cc"
+
+using namespace isc::util::python;
+using namespace isc::dns::python;
+using namespace isc::datasrc;
+using namespace isc::datasrc::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_ZoneJournalReader : public PyObject {
+public:
+    s_ZoneJournalReader() : cppobj(ZoneJournalReaderPtr()), base_obj(NULL) {};
+    ZoneJournalReaderPtr cppobj;
+    // This is a reference to a base object; if the object of this class
+    // depends on another object to be in scope during its lifetime,
+    // we use INCREF the base object upon creation, and DECREF it at
+    // the end of the destructor
+    // This is an optional argument to createXXX(). If NULL, it is ignored.
+    PyObject* base_obj;
+};
+
+// General creation and destruction
+int
+ZoneJournalReader_init(PyObject*, PyObject*, PyObject*) {
+    // can't be called directly
+    PyErr_SetString(PyExc_TypeError,
+                    "ZoneJournalReader cannot be constructed directly");
+
+    return (-1);
+}
+
+void
+ZoneJournalReader_destroy(PyObject* po_self) {
+    s_ZoneJournalReader* const self =
+        static_cast<s_ZoneJournalReader*>(po_self) ;
+    // cppobj is a shared ptr, but to make sure things are not destroyed in
+    // the wrong order, we reset it here.
+    self->cppobj.reset();
+    if (self->base_obj != NULL) {
+        Py_DECREF(self->base_obj);
+    }
+    Py_TYPE(self)->tp_free(self);
+}
+
+//
+// We declare the functions here, the definitions are below
+// the type definition of the object, since both can use the other
+//
+PyObject*
+ZoneJournalReader_getNextDiff(PyObject* po_self, PyObject*) {
+    s_ZoneJournalReader* self = static_cast<s_ZoneJournalReader*>(po_self);
+    try {
+        isc::dns::ConstRRsetPtr rrset = self->cppobj->getNextDiff();
+        if (!rrset) {
+            Py_RETURN_NONE;
+        }
+        return (createRRsetObject(*rrset));
+    } catch (const isc::InvalidOperation& ex) {
+        PyErr_SetString(PyExc_ValueError, ex.what());
+        return (NULL);
+    } catch (const isc::Exception& isce) {
+        PyErr_SetString(getDataSourceException("Error"), isce.what());
+        return (NULL);
+    } catch (const std::exception& exc) {
+        PyErr_SetString(getDataSourceException("Error"), exc.what());
+        return (NULL);
+    } catch (...) {
+        PyErr_SetString(getDataSourceException("Error"),
+                        "Unexpected exception");
+        return (NULL);
+    }
+}
+
+PyObject*
+ZoneJournalReader_iter(PyObject *self) {
+    Py_INCREF(self);
+    return (self);
+}
+
+PyObject*
+ZoneJournalReader_next(PyObject* self) {
+    PyObject* result = ZoneJournalReader_getNextDiff(self, NULL);
+    // iter_next must return NULL without error instead of Py_None
+    if (result == Py_None) {
+        Py_DECREF(result);
+        return (NULL);
+    } else {
+        return (result);
+    }
+}
+
+PyMethodDef ZoneJournalReader_methods[] = {
+    { "get_next_diff", ZoneJournalReader_getNextDiff, METH_NOARGS,
+      ZoneJournalReader_getNextDiff_doc },
+    { NULL, NULL, 0, NULL }
+};
+
+
+} // end of unnamed namespace
+
+namespace isc {
+namespace datasrc {
+namespace python {
+PyTypeObject journal_reader_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "datasrc.ZoneJournalReader",
+    sizeof(s_ZoneJournalReader),             // tp_basicsize
+    0,                                  // tp_itemsize
+    ZoneJournalReader_destroy,          // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    NULL,                               // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    NULL,                               // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    ZoneJournalReader_doc,
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    NULL,                               // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    ZoneJournalReader_iter,                  // tp_iter
+    ZoneJournalReader_next,                  // tp_iternext
+    ZoneJournalReader_methods,               // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    ZoneJournalReader_init,             // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createZoneJournalReaderObject(ZoneJournalReaderPtr source,
+                              PyObject* base_obj)
+{
+    s_ZoneJournalReader* po = static_cast<s_ZoneJournalReader*>(
+        journal_reader_type.tp_alloc(&journal_reader_type, 0));
+    if (po != NULL) {
+        po->cppobj = source;
+        po->base_obj = base_obj;
+        if (base_obj != NULL) {
+            Py_INCREF(base_obj);
+        }
+    }
+    return (po);
+}
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
diff --git a/src/lib/python/isc/datasrc/journal_reader_python.h b/src/lib/python/isc/datasrc/journal_reader_python.h
new file mode 100644
index 0000000..56344df
--- /dev/null
+++ b/src/lib/python/isc/datasrc/journal_reader_python.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_DATASRC_JOURNAL_READER_H
+#define __PYTHON_DATASRC_JOURNAL_READER_H 1
+
+#include <Python.h>
+
+#include <datasrc/zone.h>
+
+namespace isc {
+namespace datasrc {
+namespace python {
+
+extern PyTypeObject journal_reader_type;
+
+/// \brief Create a ZoneJournalReader python object
+///
+/// \param source The zone journal reader pointer to wrap
+/// \param base_obj An optional PyObject that this ZoneJournalReader depends on
+///                 Its refcount is increased, and will be decreased when
+///                 this reader is destroyed, making sure that the
+///                 base object is never destroyed before this reader.
+PyObject* createZoneJournalReaderObject(
+    isc::datasrc::ZoneJournalReaderPtr source,
+    PyObject* base_obj = NULL);
+
+
+} // namespace python
+} // namespace datasrc
+} // namespace isc
+#endif // __PYTHON_DATASRC_JOURNAL_READER_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/python/isc/datasrc/sqlite3_ds.py b/src/lib/python/isc/datasrc/sqlite3_ds.py
index fd63741..daa12fc 100644
--- a/src/lib/python/isc/datasrc/sqlite3_ds.py
+++ b/src/lib/python/isc/datasrc/sqlite3_ds.py
@@ -72,6 +72,14 @@ def create(cur):
                     rdtype STRING NOT NULL COLLATE NOCASE,
                     rdata STRING NOT NULL)""")
         cur.execute("CREATE INDEX nsec3_byhash ON nsec3 (hash)")
+        cur.execute("""CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+                    zone_id INTEGER NOT NULL,
+                    version INTEGER NOT NULL,
+                    operation INTEGER NOT NULL,
+                    name STRING NOT NULL COLLATE NOCASE,
+                    rrtype STRING NOT NULL COLLATE NOCASE,
+                    ttl INTEGER NOT NULL,
+                    rdata STRING NOT NULL)""")
         row = [1]
     cur.execute("COMMIT TRANSACTION")
     return row
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 411b5cc..ab89b93 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -6,6 +6,7 @@ EXTRA_DIST = $(PYTESTS)
 
 EXTRA_DIST += testdata/brokendb.sqlite3
 EXTRA_DIST += testdata/example.com.sqlite3
+EXTRA_DIST += testdata/test.sqlite3.nodiffs
 CLEANFILES = $(abs_builddir)/rwtest.sqlite3.copied
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
@@ -33,5 +34,6 @@ endif
 	PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	TESTDATA_PATH=$(abs_srcdir)/testdata \
 	TESTDATA_WRITE_PATH=$(abs_builddir) \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 02020e2..3e4a1d7 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -15,8 +15,9 @@
 
 import isc.log
 import isc.datasrc
-from isc.datasrc import ZoneFinder
+from isc.datasrc import ZoneFinder, ZoneJournalReader
 from isc.dns import *
+from isc.testutils.rrset_utils import rrsets_equal
 import unittest
 import sqlite3
 import os
@@ -40,19 +41,6 @@ def add_rrset(rrset_list, name, rrclass, rrtype, ttl, rdatas):
             rrset_to_add.add_rdata(isc.dns.Rdata(rrtype, rrclass, rdata))
     rrset_list.append(rrset_to_add)
 
-# helper function, we have no direct rrset comparison atm
-def rrsets_equal(a, b):
-    # no accessor for sigs either (so this only checks name, class, type, ttl,
-    # and rdata)
-    # also, because of the fake data in rrsigs, if the type is rrsig, the
-    # rdata is not checked
-    return a.get_name() == b.get_name() and\
-           a.get_class() == b.get_class() and\
-           a.get_type() == b.get_type() and \
-           a.get_ttl() == b.get_ttl() and\
-           (a.get_type() == isc.dns.RRType.RRSIG() or
-            sorted(a.get_rdata()) == sorted(b.get_rdata()))
-
 # returns true if rrset is in expected_rrsets
 # will remove the rrset from expected_rrsets if found
 def check_for_rrset(expected_rrsets, rrset):
@@ -62,6 +50,13 @@ def check_for_rrset(expected_rrsets, rrset):
             return True
     return False
 
+def create_soa(serial):
+    soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+    soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                        'ns1.example.org. admin.example.org. ' +
+                        str(serial) + ' 3600 1800 2419200 7200'))
+    return soa
+
 class DataSrcClient(unittest.TestCase):
 
     def test_(self):
@@ -290,6 +285,24 @@ class DataSrcClient(unittest.TestCase):
         self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
                          rrset.to_text())
 
+        # Check the optional parameters are optional
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A())
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(), None)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Invalid value for the "target"
+        self.assertRaises(TypeError, finder.find,
+                          isc.dns.Name("www.example.com"),
+                          isc.dns.RRType.A(), True)
+
         result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
                                     isc.dns.RRType.A(),
                                     None,
@@ -390,6 +403,36 @@ class DataSrcUpdater(unittest.TestCase):
         # can't construct directly
         self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
 
+    def test_update_finder(self):
+        # Check basic behavior of updater's finder
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+        updater = dsc.get_updater(isc.dns.Name("example.com"), False)
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A(),
+                                     None,
+                                     ZoneFinder.FIND_DEFAULT)
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Omit optional parameters
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A())
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A(), None)
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Invalid value for 'target'
+        self.assertRaises(TypeError, updater.find,
+                          isc.dns.Name("www.example.com"),
+                          isc.dns.RRType.A(), 1)
+
     def test_update_delete_commit(self):
 
         dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
@@ -606,14 +649,6 @@ class JournalWrite(unittest.TestCase):
             self.assertEqual(expected, actual)
         conn.close()
 
-    def create_soa(self, serial):
-        soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(),
-                    RRTTL(3600))
-        soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
-                            'ns1.example.org. admin.example.org. ' +
-                            str(serial) + ' 3600 1800 2419200 7200'))
-        return soa
-
     def create_a(self, address):
         a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
                      RRTTL(3600))
@@ -624,9 +659,9 @@ class JournalWrite(unittest.TestCase):
         # This is a straightforward port of the C++ 'journal' test
         # Note: we add/delete 'out of zone' data (example.org in the
         # example.com zone for convenience.
-        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(create_soa(1234))
         self.updater.delete_rrset(self.create_a('192.0.2.2'))
-        self.updater.add_rrset(self.create_soa(1235))
+        self.updater.add_rrset(create_soa(1235))
         self.updater.add_rrset(self.create_a('192.0.2.2'))
         self.updater.commit()
 
@@ -645,11 +680,11 @@ class JournalWrite(unittest.TestCase):
         # This is a straightforward port of the C++ 'journalMultiple' test
         expected = []
         for i in range(1, 100):
-            self.updater.delete_rrset(self.create_soa(1234 + i - 1))
+            self.updater.delete_rrset(create_soa(1234 + i - 1))
             expected.append(("example.org.", "SOA", 3600,
                              "ns1.example.org. admin.example.org. " +
                              str(1234 + i - 1) + " 3600 1800 2419200 7200"))
-            self.updater.add_rrset(self.create_soa(1234 + i))
+            self.updater.add_rrset(create_soa(1234 + i))
             expected.append(("example.org.", "SOA", 3600,
                              "ns1.example.org. admin.example.org. " +
                              str(1234 + i) + " 3600 1800 2419200 7200"))
@@ -665,27 +700,27 @@ class JournalWrite(unittest.TestCase):
         # Add before delete
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
         self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
-                          self.create_soa(1234))
+                          create_soa(1234))
         # Add A before SOA
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
-        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(create_soa(1234))
         self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
                           self.create_a('192.0.2.1'))
         # Commit before add
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
-        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(create_soa(1234))
         self.assertRaises(isc.datasrc.Error, self.updater.commit)
         # Delete two SOAs
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
-        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(create_soa(1234))
         self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
-                          self.create_soa(1235))
+                          create_soa(1235))
         # Add two SOAs
         self.updater = self.dsc.get_updater(Name("example.com"), False, True)
-        self.updater.delete_rrset(self.create_soa(1234))
-        self.updater.add_rrset(self.create_soa(1235))
+        self.updater.delete_rrset(create_soa(1234))
+        self.updater.add_rrset(create_soa(1235))
         self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
-                          self.create_soa(1236))
+                          create_soa(1236))
 
     def test_journal_write_onerase(self):
         self.updater = None
@@ -700,6 +735,119 @@ class JournalWrite(unittest.TestCase):
         self.assertRaises(TypeError, dsc.get_updater, Name("example.com"),
                           1, True)
 
+class JournalRead(unittest.TestCase):
+    def setUp(self):
+        # Make a fresh copy of the writable database with all original content
+        self.zname = Name('example.com')
+        shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+        self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+                                                WRITE_ZONE_DB_CONFIG)
+        self.reader = None
+
+    def tearDown(self):
+        # Some tests leave the reader in the middle of sequence, holding
+        # the lock.  Since the unittest framework keeps each test object
+        # until the end of the entire tests, we need to make sure the reader
+        # is released at the end of each test.  The client shouldn't do harm
+        # but we clean it up, too, just in case.
+        self.dsc = None
+        self.reader = None
+
+    def make_simple_diff(self, begin_soa):
+        updater = self.dsc.get_updater(self.zname, False, True)
+        updater.delete_rrset(begin_soa)
+        updater.add_rrset(create_soa(1235))
+        updater.commit()
+
+    def test_journal_reader(self):
+        # This is a straightforward port of the C++ 'journalReader' test
+        self.make_simple_diff(create_soa(1234))
+        result, self.reader = self.dsc.get_journal_reader(self.zname, 1234,
+                                                          1235)
+        self.assertEqual(ZoneJournalReader.SUCCESS, result)
+        self.assertNotEqual(None, self.reader)
+        rrsets_equal(create_soa(1234), self.reader.get_next_diff())
+        rrsets_equal(create_soa(1235), self.reader.get_next_diff())
+        self.assertEqual(None, self.reader.get_next_diff())
+        self.assertRaises(ValueError, self.reader.get_next_diff)
+
+    def test_journal_reader_with_large_serial(self):
+        # similar to the previous one, but use a very large serial to check
+        # if the python wrapper code has unexpected integer overflow
+        self.make_simple_diff(create_soa(4294967295))
+        result, self.reader = self.dsc.get_journal_reader(self.zname,
+                                                          4294967295, 1235)
+        self.assertNotEqual(None, self.reader)
+        # dump to text and compare them in case create_soa happens to have
+        # an overflow bug
+        self.assertEqual('example.org. 3600 IN SOA ns1.example.org. ' + \
+                         'admin.example.org. 4294967295 3600 1800 ' + \
+                             '2419200 7200\n',
+                         self.reader.get_next_diff().to_text())
+
+    def test_journal_reader_large_journal(self):
+        # This is a straightforward port of the C++ 'readLargeJournal' test.
+        # In this test we use the ZoneJournalReader object as a Python
+        # iterator.
+        updater = self.dsc.get_updater(self.zname, False, True)
+        expected = []
+        for i in range(0, 100):
+            rrset = create_soa(1234 + i)
+            updater.delete_rrset(rrset)
+            expected.append(rrset)
+
+            rrset = create_soa(1234 + i + 1)
+            updater.add_rrset(rrset)
+            expected.append(rrset)
+
+        updater.commit()
+        _, self.reader = self.dsc.get_journal_reader(self.zname, 1234, 1334)
+        self.assertNotEqual(None, self.reader)
+        i = 0
+        for rr in self.reader:
+            self.assertNotEqual(len(expected), i)
+            rrsets_equal(expected[i], rr)
+            i += 1
+        self.assertEqual(len(expected), i)
+
+    def test_journal_reader_no_range(self):
+        # This is a straightforward port of the C++ 'readJournalForNoRange'
+        # test
+        self.make_simple_diff(create_soa(1234))
+        result, self.reader = self.dsc.get_journal_reader(self.zname, 1200,
+                                                          1235)
+        self.assertEqual(ZoneJournalReader.NO_SUCH_VERSION, result)
+        self.assertEqual(None, self.reader)
+
+    def test_journal_reader_no_zone(self):
+        # This is a straightforward port of the C++ 'journalReaderForNXZone'
+        # test
+        result, self.reader = self.dsc.get_journal_reader(Name('nosuchzone'),
+                                                          0, 1)
+        self.assertEqual(ZoneJournalReader.NO_SUCH_ZONE, result)
+        self.assertEqual(None, self.reader)
+
+    def test_journal_reader_bad_params(self):
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          'example.com.', 0, 1)
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          self.zname, 'must be int', 1)
+        self.assertRaises(TypeError, self.dsc.get_journal_reader,
+                          self.zname, 0, 'must be int')
+
+    def test_journal_reader_direct_construct(self):
+        # ZoneJournalReader can only be constructed via a factory
+        self.assertRaises(TypeError, ZoneJournalReader)
+
+    def test_journal_reader_old_schema(self):
+        # The database doesn't have a "diffs" table.
+        dbfile = TESTDATA_PATH + 'test.sqlite3.nodiffs'
+        client = isc.datasrc.DataSourceClient("sqlite3",
+                                              "{ \"database_file\": \"" + \
+                                                  dbfile + "\" }")
+        self.assertRaises(isc.datasrc.Error, client.get_journal_reader,
+                          self.zname, 0, 1)
+
 if __name__ == "__main__":
     isc.log.init("bind10")
     isc.log.resetUnitTestRootLogger()
diff --git a/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs
new file mode 100644
index 0000000..cc8cfc3
Binary files /dev/null and b/src/lib/python/isc/datasrc/tests/testdata/test.sqlite3.nodiffs differ
diff --git a/src/lib/python/isc/log/log.cc b/src/lib/python/isc/log/log.cc
index c7112b3..2e4a28f 100644
--- a/src/lib/python/isc/log/log.cc
+++ b/src/lib/python/isc/log/log.cc
@@ -303,7 +303,8 @@ public:
 extern PyTypeObject logger_type;
 
 int
-Logger_init(LoggerWrapper* self, PyObject* args) {
+Logger_init(PyObject* po_self, PyObject* args, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     const char* name;
     if (!PyArg_ParseTuple(args, "s", &name)) {
         return (-1);
@@ -323,7 +324,9 @@ Logger_init(LoggerWrapper* self, PyObject* args) {
 }
 
 void
-Logger_destroy(LoggerWrapper* const self) {
+//Logger_destroy(LoggerWrapper* const self) {
+Logger_destroy(PyObject* po_self) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     delete self->logger_;
     self->logger_ = NULL;
     Py_TYPE(self)->tp_free(self);
@@ -351,7 +354,8 @@ severityToText(const Severity& severity) {
 }
 
 PyObject*
-Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveSeverity(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     try {
         return (Py_BuildValue("s",
                               severityToText(
@@ -368,7 +372,8 @@ Logger_getEffectiveSeverity(LoggerWrapper* self, PyObject*) {
 }
 
 PyObject*
-Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
+Logger_getEffectiveDebugLevel(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     try {
         return (Py_BuildValue("i", self->logger_->getEffectiveDebugLevel()));
     }
@@ -383,7 +388,8 @@ Logger_getEffectiveDebugLevel(LoggerWrapper* self, PyObject*) {
 }
 
 PyObject*
-Logger_setSeverity(LoggerWrapper* self, PyObject* args) {
+Logger_setSeverity(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     const char* severity;
     int dbgLevel = 0;
     if (!PyArg_ParseTuple(args, "z|i", &severity, &dbgLevel)) {
@@ -425,27 +431,32 @@ Logger_isLevelEnabled(LoggerWrapper* self, FPtr function) {
 }
 
 PyObject*
-Logger_isInfoEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isInfoEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isInfoEnabled));
 }
 
 PyObject*
-Logger_isWarnEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isWarnEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isWarnEnabled));
 }
 
 PyObject*
-Logger_isErrorEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isErrorEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isErrorEnabled));
 }
 
 PyObject*
-Logger_isFatalEnabled(LoggerWrapper* self, PyObject*) {
+Logger_isFatalEnabled(PyObject* po_self, PyObject*) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_isLevelEnabled(self, &Logger::isFatalEnabled));
 }
 
 PyObject*
-Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
+Logger_isDebugEnabled(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     int level = MIN_DEBUG_LEVEL;
     if (!PyArg_ParseTuple(args, "|i", &level)) {
         return (NULL);
@@ -470,53 +481,39 @@ Logger_isDebugEnabled(LoggerWrapper* self, PyObject* args) {
 
 string
 objectToStr(PyObject* object, bool convert) {
-    PyObject* cleanup(NULL);
+    PyObjectContainer objstr_container;
     if (convert) {
-        object = cleanup = PyObject_Str(object);
-        if (object == NULL) {
+        PyObject* text_obj = PyObject_Str(object);
+        if (text_obj == NULL) {
+            // PyObject_Str could fail for various reasons, including because
+            // the object cannot be converted to a string.  We exit with
+            // InternalError to preserve the PyErr set in PyObject_Str.
             throw InternalError();
         }
-    }
-    const char* value;
-    PyObject* tuple(Py_BuildValue("(O)", object));
-    if (tuple == NULL) {
-        if (cleanup != NULL) {
-            Py_DECREF(cleanup);
-        }
-        throw InternalError();
+        objstr_container.reset(text_obj);
+        object = objstr_container.get();
     }
 
-    if (!PyArg_ParseTuple(tuple, "s", &value)) {
-        Py_DECREF(tuple);
-        if (cleanup != NULL) {
-            Py_DECREF(cleanup);
-        }
+    PyObjectContainer tuple_container(Py_BuildValue("(O)", object));
+    const char* value;
+    if (!PyArg_ParseTuple(tuple_container.get(), "s", &value)) {
         throw InternalError();
     }
-    string result(value);
-    Py_DECREF(tuple);
-    if (cleanup != NULL) {
-        Py_DECREF(cleanup);
-    }
-    return (result);
+    return (string(value));
 }
 
 // Generic function to output the logging message. Called by the real functions.
-template<class Function>
+template <class Function>
 PyObject*
 Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
     try {
-        Py_ssize_t number(PyObject_Length(args));
+        const Py_ssize_t number(PyObject_Length(args));
         if (number < 0) {
             return (NULL);
         }
 
         // Which argument is the first to format?
-        size_t start(1);
-        if (dbgLevel) {
-            start ++;
-        }
-
+        const size_t start = dbgLevel ? 2 : 1;
         if (number < start) {
             return (PyErr_Format(PyExc_TypeError, "Too few arguments to "
                                  "logging call, at least %zu needed and %zd "
@@ -524,18 +521,10 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
         }
 
         // Extract the fixed arguments
-        PyObject *midO(PySequence_GetItem(args, start - 1));
-        if (midO == NULL) {
-            return (NULL);
-        }
-        string mid(objectToStr(midO, false));
         long dbg(0);
         if (dbgLevel) {
-            PyObject *dbgO(PySequence_GetItem(args, 0));
-            if (dbgO == NULL) {
-                return (NULL);
-            }
-            dbg = PyLong_AsLong(dbgO);
+            PyObjectContainer dbg_container(PySequence_GetItem(args, 0));
+            dbg = PyLong_AsLong(dbg_container.get());
             if (PyErr_Occurred()) {
                 return (NULL);
             }
@@ -544,16 +533,16 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
         // We create the logging message right now. If we fail to convert a
         // parameter to string, at least the part that we already did will
         // be output
+        PyObjectContainer msgid_container(PySequence_GetItem(args, start - 1));
+        const string mid(objectToStr(msgid_container.get(), false));
         Logger::Formatter formatter(function(dbg, mid.c_str()));
 
         // Now process the rest of parameters, convert each to string and put
         // into the formatter. It will print itself in the end.
         for (size_t i(start); i < number; ++ i) {
-            PyObject* param(PySequence_GetItem(args, i));
-            if (param == NULL) {
-                return (NULL);
-            }
-            formatter = formatter.arg(objectToStr(param, true));
+            PyObjectContainer param_container(PySequence_GetItem(args, i));
+            formatter = formatter.arg(objectToStr(param_container.get(),
+                                                  true));
         }
         Py_RETURN_NONE;
     }
@@ -573,72 +562,74 @@ Logger_performOutput(Function function, PyObject* args, bool dbgLevel) {
 // Now map the functions into the performOutput. I wish C++ could do
 // functional programming.
 PyObject*
-Logger_debug(LoggerWrapper* self, PyObject* args) {
+Logger_debug(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::debug, self->logger_, _1, _2),
                                  args, true));
 }
 
 PyObject*
-Logger_info(LoggerWrapper* self, PyObject* args) {
+Logger_info(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::info, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_warn(LoggerWrapper* self, PyObject* args) {
+Logger_warn(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::warn, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_error(LoggerWrapper* self, PyObject* args) {
+Logger_error(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::error, self->logger_, _2),
                                  args, false));
 }
 
 PyObject*
-Logger_fatal(LoggerWrapper* self, PyObject* args) {
+Logger_fatal(PyObject* po_self, PyObject* args) {
+    LoggerWrapper* self = static_cast<LoggerWrapper*>(po_self);
     return (Logger_performOutput(bind(&Logger::fatal, self->logger_, _2),
                                  args, false));
 }
 
 PyMethodDef loggerMethods[] = {
-    { "get_effective_severity",
-        reinterpret_cast<PyCFunction>(Logger_getEffectiveSeverity),
-        METH_NOARGS, "Returns the effective logging severity as string" },
-    { "get_effective_debug_level",
-        reinterpret_cast<PyCFunction>(Logger_getEffectiveDebugLevel),
-        METH_NOARGS, "Returns the current debug level." },
-    { "set_severity",
-        reinterpret_cast<PyCFunction>(Logger_setSeverity), METH_VARARGS,
+    { "get_effective_severity", Logger_getEffectiveSeverity, METH_NOARGS,
+        "Returns the effective logging severity as string" },
+    { "get_effective_debug_level", Logger_getEffectiveDebugLevel, METH_NOARGS,
+        "Returns the current debug level." },
+    { "set_severity", Logger_setSeverity, METH_VARARGS,
         "Sets the severity of a logger. The parameters are severity as a "
         "string and, optionally, a debug level (integer in range 0-99). "
         "The severity may be NULL, in which case an inherited value is taken."
     },
-    { "is_debug_enabled", reinterpret_cast<PyCFunction>(Logger_isDebugEnabled),
-        METH_VARARGS, "Returns if the logger would log debug message now. "
+    { "is_debug_enabled", Logger_isDebugEnabled, METH_VARARGS,
+      "Returns if the logger would log debug message now. "
             "You can provide a desired debug level." },
-    { "is_info_enabled", reinterpret_cast<PyCFunction>(Logger_isInfoEnabled),
-        METH_NOARGS, "Returns if the logger would log info message now." },
-    { "is_warn_enabled", reinterpret_cast<PyCFunction>(Logger_isWarnEnabled),
-        METH_NOARGS, "Returns if the logger would log warn message now." },
-    { "is_error_enabled", reinterpret_cast<PyCFunction>(Logger_isErrorEnabled),
-        METH_NOARGS, "Returns if the logger would log error message now." },
-    { "is_fatal_enabled", reinterpret_cast<PyCFunction>(Logger_isFatalEnabled),
-        METH_NOARGS, "Returns if the logger would log fatal message now." },
-    { "debug", reinterpret_cast<PyCFunction>(Logger_debug), METH_VARARGS,
+    { "is_info_enabled", Logger_isInfoEnabled, METH_NOARGS,
+      "Returns if the logger would log info message now." },
+    { "is_warn_enabled", Logger_isWarnEnabled, METH_NOARGS,
+      "Returns if the logger would log warn message now." },
+    { "is_error_enabled", Logger_isErrorEnabled, METH_NOARGS,
+      "Returns if the logger would log error message now." },
+    { "is_fatal_enabled", Logger_isFatalEnabled, METH_NOARGS,
+      "Returns if the logger would log fatal message now." },
+    { "debug", Logger_debug, METH_VARARGS,
         "Logs a debug-severity message. It takes the debug level, message ID "
         "and any number of stringifiable arguments to the message." },
-    { "info", reinterpret_cast<PyCFunction>(Logger_info), METH_VARARGS,
+    { "info", Logger_info, METH_VARARGS,
         "Logs a info-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "warn", reinterpret_cast<PyCFunction>(Logger_warn), METH_VARARGS,
+    { "warn", Logger_warn, METH_VARARGS,
         "Logs a warn-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "error", reinterpret_cast<PyCFunction>(Logger_error), METH_VARARGS,
+    { "error", Logger_error, METH_VARARGS,
         "Logs a error-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
-    { "fatal", reinterpret_cast<PyCFunction>(Logger_fatal), METH_VARARGS,
+    { "fatal", Logger_fatal, METH_VARARGS,
         "Logs a fatal-severity message. It taskes the message ID and any "
         "number of stringifiable arguments to the message." },
     { NULL, NULL, 0, NULL }
@@ -649,7 +640,7 @@ PyTypeObject logger_type = {
     "isc.log.Logger",
     sizeof(LoggerWrapper),                 // tp_basicsize
     0,                                  // tp_itemsize
-    reinterpret_cast<destructor>(Logger_destroy),       // tp_dealloc
+    Logger_destroy,                     // tp_dealloc
     NULL,                               // tp_print
     NULL,                               // tp_getattr
     NULL,                               // tp_setattr
@@ -681,7 +672,7 @@ PyTypeObject logger_type = {
     NULL,                               // tp_descr_get
     NULL,                               // tp_descr_set
     0,                                  // tp_dictoffset
-    reinterpret_cast<initproc>(Logger_init),            // tp_init
+    Logger_init,                        // tp_init
     NULL,                               // tp_alloc
     PyType_GenericNew,                  // tp_new
     NULL,                               // tp_free
@@ -718,21 +709,21 @@ PyInit_log(void) {
         return (NULL);
     }
 
-    if (PyType_Ready(&logger_type) < 0) {
-        return (NULL);
-    }
-
-    if (PyModule_AddObject(mod, "Logger",
-                           static_cast<PyObject*>(static_cast<void*>(
-                               &logger_type))) < 0) {
-        return (NULL);
-    }
-
-    // Add in the definitions of the standard debug levels.  These can then
-    // be referred to in Python through the constants log.DBGLVL_XXX.
+    // Finalize logger class and add in the definitions of the standard debug
+    // levels.  These can then be referred to in Python through the constants
+    // log.DBGLVL_XXX.
     // N.B. These should be kept in sync with the constants defined in
     // log_dbglevels.h.
     try {
+        if (PyType_Ready(&logger_type) < 0) {
+            throw InternalError();
+        }
+        void* p = &logger_type;
+        if (PyModule_AddObject(mod, "Logger",
+                               static_cast<PyObject*>(p)) < 0) {
+            throw InternalError();
+        }
+
         installClassVariable(logger_type, "DBGLVL_START_SHUT",
                              Py_BuildValue("I", DBGLVL_START_SHUT));
         installClassVariable(logger_type, "DBGLVL_COMMAND",
@@ -747,15 +738,20 @@ PyInit_log(void) {
                              Py_BuildValue("I", DBGLVL_TRACE_DETAIL));
         installClassVariable(logger_type, "DBGLVL_TRACE_DETAIL_DATA",
                              Py_BuildValue("I", DBGLVL_TRACE_DETAIL_DATA));
+    } catch (const InternalError&) {
+        Py_DECREF(mod);
+        return (NULL);
     } catch (const std::exception& ex) {
         const std::string ex_what =
             "Unexpected failure in Log initialization: " +
             std::string(ex.what());
         PyErr_SetString(PyExc_SystemError, ex_what.c_str());
+        Py_DECREF(mod);
         return (NULL);
     } catch (...) {
         PyErr_SetString(PyExc_SystemError,
                         "Unexpected failure in Log initialization");
+        Py_DECREF(mod);
         return (NULL);
     }
 
diff --git a/src/lib/python/isc/log/tests/log_test.py b/src/lib/python/isc/log/tests/log_test.py
index 8deaeae..1337654 100644
--- a/src/lib/python/isc/log/tests/log_test.py
+++ b/src/lib/python/isc/log/tests/log_test.py
@@ -17,6 +17,7 @@
 import isc.log
 import unittest
 import json
+import sys
 import bind10_config
 from isc.config.ccsession import path_search
 
@@ -89,6 +90,7 @@ class Logger(unittest.TestCase):
     def setUp(self):
         isc.log.init("root", "DEBUG", 50)
         self.sevs = ['INFO', 'WARN', 'ERROR', 'FATAL']
+        self.TEST_MSG = isc.log.create_message('TEST_MESSAGE', '%1')
 
     # Checks defaults of the logger
     def defaults(self, logger):
@@ -169,5 +171,34 @@ class Logger(unittest.TestCase):
         logger = isc.log.Logger("child")
         self.assertEqual(logger.DBGLVL_COMMAND, 10)
 
+    def test_param_reference(self):
+        """
+        Check whether passing a parameter to a logger causes a reference leak.
+        """
+        class LogParam:
+            def __str__(self):
+                return 'LogParam'
+        logger = isc.log.Logger("child")
+        param = LogParam()
+        orig_msgrefcnt = sys.getrefcount(param)
+        orig_idrefcnt = sys.getrefcount(self.TEST_MSG)
+        logger.info(self.TEST_MSG, param);
+        self.assertEqual(sys.getrefcount(self.TEST_MSG), orig_idrefcnt)
+        self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+        # intentionally pass an invalid type for debug level.  It will
+        # result in TypeError.  The passed object still shouldn't leak a
+        # reference.
+        self.assertRaises(TypeError, logger.debug, param, self.TEST_MSG, param)
+        self.assertEqual(sys.getrefcount(param), orig_msgrefcnt)
+
+    def test_bad_parameter(self):
+        # a log parameter cannot be converted to a string object.
+        class LogParam:
+            def __str__(self):
+                raise ValueError("LogParam can't be converted to string")
+        logger = isc.log.Logger("child")
+        self.assertRaises(ValueError, logger.info, self.TEST_MSG, LogParam())
+
 if __name__ == '__main__':
     unittest.main()
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index af79b7c..64a4b3e 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -284,14 +284,12 @@ class NotifyOut:
                          format_zone_str(zone_name, zone_class))
             return []
 
-        result, ns_rrset = finder.find(zone_name, RRType.NS(), None,
-                                       finder.FIND_DEFAULT)
+        result, ns_rrset = finder.find(zone_name, RRType.NS())
         if result is not finder.SUCCESS or ns_rrset is None:
             logger.warn(NOTIFY_OUT_ZONE_NO_NS,
                         format_zone_str(zone_name, zone_class))
             return []
-        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
-                                        finder.FIND_DEFAULT)
+        result, soa_rrset = finder.find(zone_name, RRType.SOA())
         if result is not finder.SUCCESS or soa_rrset is None or \
                 soa_rrset.get_rdata_count() != 1:
             logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
@@ -304,13 +302,11 @@ class NotifyOut:
             ns_name = Name(ns_rdata.to_text())
             if soa_mname == ns_name:
                 continue
-            result, rrset = finder.find(ns_name, RRType.A(), None,
-                                        finder.FIND_DEFAULT)
+            result, rrset = finder.find(ns_name, RRType.A())
             if result is finder.SUCCESS and rrset is not None:
                 addrs.extend([a.to_text() for a in rrset.get_rdata()])
 
-            result, rrset = finder.find(ns_name, RRType.AAAA(), None,
-                                        finder.FIND_DEFAULT)
+            result, rrset = finder.find(ns_name, RRType.AAAA())
             if result is finder.SUCCESS and rrset is not None:
                 addrs.extend([aaaa.to_text() for aaaa in rrset.get_rdata()])
 
@@ -504,8 +500,7 @@ class NotifyOut:
                                            zone_name.to_text() + '/' +
                                            zone_class.to_text() + ' not found')
 
-        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
-                                        finder.FIND_DEFAULT)
+        result, soa_rrset = finder.find(zone_name, RRType.SOA())
         if result is not finder.SUCCESS or soa_rrset is None or \
                 soa_rrset.get_rdata_count() != 1:
             raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 6b62b90..3af5991 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -29,5 +29,6 @@ endif
 	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
 	TESTDATASRCDIR=$(abs_top_srcdir)/src/lib/python/isc/notify/tests/testdata/ \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/testutils/Makefile.am b/src/lib/python/isc/testutils/Makefile.am
index 0b08257..5479d83 100644
--- a/src/lib/python/isc/testutils/Makefile.am
+++ b/src/lib/python/isc/testutils/Makefile.am
@@ -1,4 +1,4 @@
-EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py
+EXTRA_DIST = __init__.py parse_args.py tsigctx_mock.py rrset_utils.py
 
 CLEANDIRS = __pycache__
 
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
new file mode 100644
index 0000000..7eac772
--- /dev/null
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -0,0 +1,82 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+'''Utility functions handling DNS RRsets commonly used for tests'''
+
+from isc.dns import *
+
+def rrsets_equal(a, b):
+    '''Compare two RRsets, return True if equal, otherwise False
+
+    We provide this function as part of test utils as we have no direct rrset
+    comparison atm.  There's no accessor for sigs either (so this only checks
+    name, class, type, ttl, and rdata).
+    Also, since we often use fake data in RRSIGs, RRSIG RDATA are not checked.
+
+    '''
+    return a.get_name() == b.get_name() and \
+           a.get_class() == b.get_class() and \
+           a.get_type() == b.get_type() and \
+           a.get_ttl() == b.get_ttl() and \
+           (a.get_type() == RRType.RRSIG() or
+            sorted(a.get_rdata()) == sorted(b.get_rdata()))
+
+# The following are short cut utilities to create an RRset of a specific
+# RR type with one RDATA.  Many of the RR parameters are common in most
+# tests, so we define default values for them for convenience.
+
+def create_a(name, address, ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.A(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+    return rrset
+
+def create_aaaa(name, address, ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.AAAA(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.AAAA(), RRClass.IN(), address))
+    return rrset
+
+def create_ns(nsname, name=Name('example.com'), ttl=3600):
+    '''For convenience we use a default name often used as a zone name'''
+    rrset = RRset(name, RRClass.IN(), RRType.NS(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
+    return rrset
+
+def create_cname(target='target.example.com', name=Name('example.com'),
+                 ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.CNAME(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.CNAME(), RRClass.IN(), target))
+    return rrset
+
+def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
+    '''Create an RR of a general type with an arbitrary length of RDATA
+
+    If the RR type isn't specified, type of 65300 will be used, which is
+    arbitrarily chosen from the IANA "Reserved for Private Usage" range.
+    The RDATA will be filled with specified length of all-0 data.
+
+    '''
+    rrset = RRset(name, RRClass.IN(), type, RRTTL(ttl))
+    rrset.add_rdata(Rdata(type, RRClass.IN(), '\\# ' +
+                          str(rdlen) + ' ' + '00' * rdlen))
+    return rrset
+
+def create_soa(serial, name=Name('example.com'), ttl=3600):
+    '''For convenience we use a default name often used as a zone name'''
+
+    rrset = RRset(name, RRClass.IN(), RRType.SOA(), RRTTL(ttl))
+    rdata_str = 'master.example.com. admin.example.com. ' + \
+        str(serial) + ' 3600 1800 2419200 7200'
+    rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), rdata_str))
+    return rrset
diff --git a/src/lib/python/isc/xfrin/diff.py b/src/lib/python/isc/xfrin/diff.py
index a2d9a7d..38b7f39 100644
--- a/src/lib/python/isc/xfrin/diff.py
+++ b/src/lib/python/isc/xfrin/diff.py
@@ -59,7 +59,7 @@ class Diff:
     the changes to underlying data source right away, but keeps them for
     a while.
     """
-    def __init__(self, ds_client, zone, replace=False):
+    def __init__(self, ds_client, zone, replace=False, journaling=False):
         """
         Initializes the diff to a ready state. It checks the zone exists
         in the datasource and if not, NoSuchZone is raised. This also creates
@@ -67,13 +67,25 @@ class Diff:
 
         The ds_client is the datasource client containing the zone. Zone is
         isc.dns.Name object representing the name of the zone (its apex).
-        If replace is true, the content of the whole zone is wiped out before
+        If replace is True, the content of the whole zone is wiped out before
         applying the diff.
 
+        If journaling is True, the history of subsequent updates will be
+        recorded as well as the updates themselves as long as the underlying
+        data source support the journaling.  If the data source allows
+        incoming updates but does not support journaling, the Diff object
+        will still continue applying the diffs with disabling journaling.
+
         You can also expect isc.datasrc.Error or isc.datasrc.NotImplemented
         exceptions.
         """
-        self.__updater = ds_client.get_updater(zone, replace)
+        try:
+            self.__updater = ds_client.get_updater(zone, replace, journaling)
+        except isc.datasrc.NotImplemented as ex:
+            if not journaling:
+                raise ex
+            self.__updater = ds_client.get_updater(zone, replace, False)
+            logger.info(LIBXFRIN_NO_JOURNAL, zone, ds_client)
         if self.__updater is None:
             # The no such zone case
             raise NoSuchZone("Zone " + str(zone) +
diff --git a/src/lib/python/isc/xfrin/libxfrin_messages.mes b/src/lib/python/isc/xfrin/libxfrin_messages.mes
index be943c8..203e31f 100644
--- a/src/lib/python/isc/xfrin/libxfrin_messages.mes
+++ b/src/lib/python/isc/xfrin/libxfrin_messages.mes
@@ -19,3 +19,13 @@
 The xfrin module received an update containing multiple rdata changes for the
 same RRset. But the TTLs of these don't match each other. As we combine them
 together, the later one get's overwritten to the earlier one in the sequence.
+
+% LIBXFRIN_NO_JOURNAL disabled journaling for updates to %1 on %2
+An attempt was made to create a Diff object with journaling enabled, but
+the underlying data source didn't support journaling (while still allowing
+updates) and so the created object has it disabled.  At a higher level this
+means that the updates will be applied to the zone but subsequent IXFR requests
+will result in a full zone transfer (i.e., an AXFR-style IXFR).  Unless the
+overhead of the full transfer is an issue this message can be ignored;
+otherwise you may want to check why the journaling wasn't allowed on the
+data source and either fix the issue or use a different type of data source.
diff --git a/src/lib/python/isc/xfrin/tests/diff_tests.py b/src/lib/python/isc/xfrin/tests/diff_tests.py
index 9fab890..9944404 100644
--- a/src/lib/python/isc/xfrin/tests/diff_tests.py
+++ b/src/lib/python/isc/xfrin/tests/diff_tests.py
@@ -15,6 +15,7 @@
 
 import isc.log
 import unittest
+import isc.datasrc
 from isc.dns import Name, RRset, RRClass, RRType, RRTTL, Rdata
 from isc.xfrin.diff import Diff, NoSuchZone
 
@@ -127,7 +128,7 @@ class DiffTest(unittest.TestCase):
         """
         return self.__rrclass
 
-    def get_updater(self, zone_name, replace):
+    def get_updater(self, zone_name, replace, journaling=False):
         """
         This one pretends this is the data source client and serves
         getting an updater.
@@ -138,11 +139,20 @@ class DiffTest(unittest.TestCase):
         # The diff should not delete the old data.
         self.assertEqual(self.__should_replace, replace)
         self.__updater_requested = True
-        # Pretend this zone doesn't exist
         if zone_name == Name('none.example.org.'):
+            # Pretend this zone doesn't exist
             return None
+
+        # If journaling is enabled, record the fact; for a special zone
+        # pretend that we don't support journaling.
+        if journaling:
+            if zone_name == Name('nodiff.example.org'):
+                raise isc.datasrc.NotImplemented('journaling not supported')
+            self.__journaling_enabled = True
         else:
-            return self
+            self.__journaling_enabled = False
+
+        return self
 
     def test_create(self):
         """
@@ -152,6 +162,8 @@ class DiffTest(unittest.TestCase):
         diff = Diff(self, Name('example.org.'))
         self.assertTrue(self.__updater_requested)
         self.assertEqual([], diff.get_buffer())
+        # By default journaling is disabled
+        self.assertFalse(self.__journaling_enabled)
 
     def test_create_nonexist(self):
         """
@@ -161,6 +173,14 @@ class DiffTest(unittest.TestCase):
         self.assertRaises(NoSuchZone, Diff, self, Name('none.example.org.'))
         self.assertTrue(self.__updater_requested)
 
+    def test_create_withjournal(self):
+        Diff(self, Name('example.org'), False, True)
+        self.assertTrue(self.__journaling_enabled)
+
+    def test_create_nojournal(self):
+        Diff(self, Name('nodiff.example.org'), False, True)
+        self.assertFalse(self.__journaling_enabled)
+
     def __data_common(self, diff, method, operation):
         """
         Common part of test for test_add and test_delte.
diff --git a/src/lib/resolve/recursive_query.cc b/src/lib/resolve/recursive_query.cc
index 0d3fb4c..1855f7a 100644
--- a/src/lib/resolve/recursive_query.cc
+++ b/src/lib/resolve/recursive_query.cc
@@ -28,9 +28,9 @@
 #include <dns/opcode.h>
 #include <dns/exceptions.h>
 #include <dns/rdataclass.h>
-
 #include <resolve/resolve.h>
 #include <resolve/resolve_log.h>
+#include <resolve/resolve_messages.h>
 #include <cache/resolver_cache.h>
 #include <nsas/address_request_callback.h>
 #include <nsas/nameserver_address.h>
@@ -39,6 +39,7 @@
 #include <asiodns/dns_service.h>
 #include <asiodns/io_fetch.h>
 #include <asiolink/io_service.h>
+#include <resolve/response_classifier.h>
 #include <resolve/recursive_query.h>
 
 using namespace isc::dns;
@@ -430,7 +431,7 @@ private:
                       .arg(questionText(question_));
             isc::resolve::copyResponseMessage(incoming, answer_message_);
             cache_.update(*answer_message_);
-            return true;
+            return (true);
             break;
 
         case isc::resolve::ResponseClassifier::CNAME:
@@ -444,7 +445,7 @@ private:
                 LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_LONG_CHAIN)
                           .arg(questionText(question_));
                 makeSERVFAIL();
-                return true;
+                return (true);
             }
 
             LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_CNAME)
@@ -460,7 +461,7 @@ private:
             LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_FOLLOW_CNAME)
                       .arg(questionText(question_));
             doLookup();
-            return false;
+            return (false);
             break;
 
         case isc::resolve::ResponseClassifier::NXDOMAIN:
@@ -471,7 +472,7 @@ private:
             isc::resolve::copyResponseMessage(incoming, answer_message_);
             // no negcache yet
             //cache_.update(*answer_message_);
-            return true;
+            return (true);
             break;
 
         case isc::resolve::ResponseClassifier::REFERRAL:
@@ -520,7 +521,7 @@ private:
                 nsas_callback_out_ = true;
                 nsas_.lookup(cur_zone_, question_.getClass(),
                              nsas_callback_, ANY_OK, glue_hints);
-                return false;
+                return (false);
             } else {
                 // Referral was received but did not contain an NS RRset.
                 LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_NO_NS_RRSET)
@@ -528,48 +529,122 @@ private:
 
                 // TODO this will result in answering with the delegation. oh well
                 isc::resolve::copyResponseMessage(incoming, answer_message_);
-                return true;
+                return (true);
             }
             break;
+
         case isc::resolve::ResponseClassifier::TRUNCATED:
             // Truncated packet.  If the protocol we used for the last one is
             // UDP, re-query using TCP.  Otherwise regard it as an error.
             if (protocol_ == IOFetch::UDP) {
-                LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_TRUNCATED)
-                          .arg(questionText(question_));
+                LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS,
+                          RESLIB_TRUNCATED).arg(questionText(question_));
                 send(IOFetch::TCP);
-                return false;
+                return (false);
+            }
+
+            // Was a TCP query so we have received a packet over TCP with the
+            // TC bit set: report an error by dropping down to the common
+            // error code.
+
+        default:
+            // Some error in received packet it.  Report it and return SERVFAIL
+            // to the caller.
+            if (logger.isDebugEnabled()) {
+                reportResponseClassifierError(category, incoming.getRcode());
             }
-            // Was a TCP query so we have received a packet over TCP with the TC
-            // bit set: drop through to common error processing.
-            // TODO: Can we use what we have received instead of discarding it?
-
-        case isc::resolve::ResponseClassifier::EMPTY:
-        case isc::resolve::ResponseClassifier::EXTRADATA:
-        case isc::resolve::ResponseClassifier::INVNAMCLASS:
-        case isc::resolve::ResponseClassifier::INVTYPE:
-        case isc::resolve::ResponseClassifier::MISMATQUEST:
-        case isc::resolve::ResponseClassifier::MULTICLASS:
-        case isc::resolve::ResponseClassifier::NOTONEQUEST:
-        case isc::resolve::ResponseClassifier::NOTRESPONSE:
-        case isc::resolve::ResponseClassifier::NOTSINGLE:
-        case isc::resolve::ResponseClassifier::OPCODE:
-        case isc::resolve::ResponseClassifier::RCODE:
-            LOG_DEBUG(isc::resolve::logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERR)
-                      .arg(questionText(question_));
-            // Should we try a different server rather than SERVFAIL?
             makeSERVFAIL();
-            return true;
-            break;
+            return (true);
         }
 
-        // Since we do not have a default in the switch above,
-        // the compiler should have errored on any missing case
-        // statements.
+        // If we get here, there is some serious logic error (or a missing
+        // "return").
         assert(false);
-        return true;
+        return (true);  // To keep the compiler happy
     }
-    
+
+    /// \brief Report classification-detected error
+    ///
+    /// When the response classifier has detected an error in the response from
+    /// an upstream query, this method is called to log a debug message giving
+    /// information about the problem.
+    ///
+    /// \param category Classification code for the packet
+    /// \param rcode RCODE value in the packet
+    void reportResponseClassifierError(ResponseClassifier::Category category,
+                                       const Rcode& rcode)
+    {
+        // We could set up a table of response classifications to message
+        // IDs here and index into that table.  But given that (a) C++ does
+        // not have C's named initializers, (b) the codes for the
+        // response classifier are in another module and (c) not all messages
+        // have the same number of arguments, the setup of the table would be
+        // almost as long as the code here: it would need to include a number
+        // of assertions to ensure that any change to the the response
+        // classifier codes was detected, and the checking logic would need to
+        // check that the numeric value of the code lay within the defined
+        // limits of the table.
+
+        if (category == ResponseClassifier::RCODE) {
+
+            // Special case as this message takes two arguments.
+            LOG_DEBUG(logger, RESLIB_DBG_RESULTS, RESLIB_RCODE_ERROR).
+                      arg(questionText(question_)).arg(rcode);
+
+        } else {
+
+            isc::log::MessageID message_id;
+            switch (category) {
+            case ResponseClassifier::TRUNCATED:
+                message_id = RESLIB_TCP_TRUNCATED;
+                break;
+
+            case ResponseClassifier::EMPTY:
+                message_id = RESLIB_EMPTY_RESPONSE;
+                break;
+
+            case ResponseClassifier::EXTRADATA:
+                message_id = RESLIB_EXTRADATA_RESPONSE;
+                break;
+
+            case ResponseClassifier::INVNAMCLASS:
+                message_id = RESLIB_INVALID_NAMECLASS_RESPONSE;
+                break;
+
+            case ResponseClassifier::INVTYPE:
+                message_id = RESLIB_INVALID_TYPE_RESPONSE;
+                break;
+
+            case ResponseClassifier::MISMATQUEST:
+                message_id = RESLIB_INVALID_QNAME_RESPONSE;
+                break;
+
+            case ResponseClassifier::MULTICLASS:
+                message_id = RESLIB_MULTIPLE_CLASS_RESPONSE;
+                break;
+
+            case ResponseClassifier::NOTONEQUEST:
+                message_id = RESLIB_NOT_ONE_QNAME_RESPONSE;
+                break;
+
+            case ResponseClassifier::NOTRESPONSE:
+                message_id = RESLIB_NOT_RESPONSE;
+                break;
+
+            case ResponseClassifier::NOTSINGLE:
+                message_id = RESLIB_NOTSINGLE_RESPONSE;
+
+            case ResponseClassifier::OPCODE:
+                message_id = RESLIB_OPCODE_RESPONSE;
+
+            default:
+                message_id = RESLIB_ERROR_RESPONSE;
+            }
+            LOG_DEBUG(logger, RESLIB_DBG_RESULTS, message_id).
+                      arg(questionText(question_));
+        }
+    }
+
 public:
     RunningQuery(IOService& io,
         const Question& question,
@@ -734,12 +809,7 @@ public:
                 incoming.fromWire(ibuf);
 
                 buffer_->clear();
-                if (incoming.getRcode() == Rcode::NOERROR()) {
-                    done_ = handleRecursiveAnswer(incoming);
-                } else {
-                    isc::resolve::copyResponseMessage(incoming, answer_message_);
-                    done_ = true;
-                }
+                done_ = handleRecursiveAnswer(incoming);
                 if (done_) {
                     callCallback(true);
                     stop();
diff --git a/src/lib/resolve/resolve_messages.mes b/src/lib/resolve/resolve_messages.mes
index f702d9b..b59fd8c 100644
--- a/src/lib/resolve/resolve_messages.mes
+++ b/src/lib/resolve/resolve_messages.mes
@@ -15,22 +15,61 @@
 $NAMESPACE isc::resolve
 
 % RESLIB_ANSWER answer received in response to query for <%1>
-A debug message recording that an answer has been received to an upstream
-query for the specified question.  Previous debug messages will have indicated
-the server to which the question was sent.
+A debug message reporting that an answer has been received to an upstream
+query for the specified question.  Previous debug messages will have
+indicated the server to which the question was sent.
 
 % RESLIB_CNAME CNAME received in response to query for <%1>
-A debug message recording that CNAME response has been received to an upstream
-query for the specified question.  Previous debug messages will have indicated
-the server to which the question was sent.
+A debug message recording that CNAME response has been received to an
+upstream query for the specified question.  Previous debug messages will
+have indicated the server to which the question was sent.
 
 % RESLIB_DEEPEST did not find <%1> in cache, deepest delegation found is %2
-A debug message, a cache lookup did not find the specified <name, class,
-type> tuple in the cache; instead, the deepest delegation found is indicated.
+A debug message, a cache lookup did not find the specified <name,
+class, type> tuple in the cache; instead, the deepest delegation found
+is indicated.
+
+% RESLIB_EMPTY_RESPONSE empty response received to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver did not contain anything in the answer or authority sections,
+although in all other respects it was a valid response.  A SERVFAIL will
+be returned to the system making the original query.
+
+% RESLIB_ERROR_RESPONSE unspecified error received in response to query for <%1>
+A debug message, the response to the specified query to an upstream
+nameserver indicated that the response was classified as an erroneous
+response, but that the nature of the error cannot be identified.
+A SERVFAIL will be returned to the system making the original query.
+
+% RESLIB_EXTRADATA_RESPONSE extra data in response to query for <%1>
+A debug message indicating that the response to the specified query
+from an upstream nameserver contained too much data.  This can happen if
+an ANY query was sent and the answer section in the response contained
+multiple RRs with different names.  A SERVFAIL will be returned to the
+system making the original query.
 
 % RESLIB_FOLLOW_CNAME following CNAME chain to <%1>
-A debug message, a CNAME response was received and another query is being issued
-for the <name, class, type> tuple.
+A debug message, a CNAME response was received and another query is
+being issued for the <name, class, type> tuple.
+
+% RESLIB_INVALID_NAMECLASS_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained either
+an answer not matching the query name or an answer having a different
+class to that queried for.  A SERVFAIL will be returned to the system
+making the original query.
+
+% RESLIB_INVALID_QNAME_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained a name
+in the question section that did not match that of the query. A SERVFAIL
+will be returned to the system making the original query.
+
+% RESLIB_INVALID_TYPE_RESPONSE invalid name or class in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) contained an
+invalid type field. A SERVFAIL will be returned to the system making
+the original query.
 
 % RESLIB_LONG_CHAIN CNAME received in response to query for <%1>: CNAME chain length exceeded
 A debug message recording that a CNAME response has been received to an upstream
@@ -39,16 +78,47 @@ the server to which the question was sent).  However, receipt of this CNAME
 has meant that the resolver has exceeded the CNAME chain limit (a CNAME chain
 is where on CNAME points to another) and so an error is being returned.
 
+% RESLIB_MULTIPLE_CLASS_RESPONSE response to query for <%1> contained multiple RRsets with different classes
+A debug message reporting that the response to an upstream query for
+the specified name contained multiple RRsets in the answer and not all
+were of the same class.  This is a violation of the standard and so a
+SERVFAIL will be returned.
+
 % RESLIB_NO_NS_RRSET no NS RRSet in referral response received to query for <%1>
 A debug message, this indicates that a response was received for the specified
 query and was categorized as a referral.  However, the received message did
 not contain any NS RRsets.  This may indicate a programming error in the
 response classification code.
 
+% RESLIB_NOT_ONE_QNAME_RESPONSE not one question in response to query for <%1>
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) did not contain
+one name in the question section as required by the standard. A SERVFAIL
+will be returned to the system making the original query.
+
+% RESLIB_NOT_RESPONSE response to query for <%1> was not a response
+A debug message, the response to the specified query from an upstream
+nameserver (as identified by the ID of the response) did not have the QR
+bit set (thus indicating that the packet was a query, not a response).
+A SERVFAIL will be returned to the system making the original query.
+
+% RESLIB_NOTSINGLE_RESPONSE response to query for <%1> was not a response
+A debug message, the response to the specified query from an upstream
+nameserver was a CNAME that had mutiple RRs in the RRset.  This is
+an invalid response according to the standards so a SERVFAIL will be
+returned to the system making the original query.
+
 % RESLIB_NSAS_LOOKUP looking up nameserver for zone %1 in the NSAS
 A debug message, the RunningQuery object is querying the NSAS for the
 nameservers for the specified zone.
 
+% RESLIB_OPCODE_RESPONSE response to query for <%1> did not have query opcode
+A debug message, the response to the specified query from an upstream
+nameserver was a response that did not have the opcode set to that of
+a query.  According to the standards, this is an invalid response to
+the query that was made, so a SERVFAIL will be returned to the system
+making the original query.
+
 % RESLIB_NXDOM_NXRR NXDOMAIN/NXRRSET received in response to query for <%1>
 A debug message recording that either a NXDOMAIN or an NXRRSET response has
 been received to an upstream query for the specified question.  Previous debug
@@ -63,7 +133,7 @@ A debug message indicating that a protocol error was received and that
 the resolver is repeating the query to the same nameserver.  After this
 repeated query, there will be the indicated number of retries left.
 
-% RESLIB_RCODE_ERR RCODE indicates error in response to query for <%1>
+% RESLIB_RCODE_ERROR response to query for <%1> returns RCODE of %2
 A debug message, the response to the specified query indicated an error
 that is not covered by a specific code path.  A SERVFAIL will be returned.
 
@@ -122,6 +192,11 @@ A debug message indicating that a RunningQuery's success callback has been
 called because a nameserver has been found, and that a query is being sent
 to the specified nameserver.
 
+% RESLIB_TCP_TRUNCATED TCP response to query for %1 was truncated
+This is a debug message logged when a response to the specified  query to an
+upstream nameserver returned a response with the TC (truncation) bit set.  This
+is treated as an error by the code.
+
 % RESLIB_TEST_SERVER setting test server to %1(%2)
 This is a warning message only generated in unit tests.  It indicates
 that all upstream queries from the resolver are being routed to the
diff --git a/src/lib/resolve/response_classifier.h b/src/lib/resolve/response_classifier.h
index 3821560..a027bd0 100644
--- a/src/lib/resolve/response_classifier.h
+++ b/src/lib/resolve/response_classifier.h
@@ -151,7 +151,7 @@ private:
         size_t size);
 };
 
-#endif // __RESPONSE_CLASSIFIER_H
-
 } // namespace resolve
 } // namespace isc
+
+#endif // __RESPONSE_CLASSIFIER_H
diff --git a/src/lib/xfr/Makefile.am b/src/lib/xfr/Makefile.am
index d714990..3d7f60f 100644
--- a/src/lib/xfr/Makefile.am
+++ b/src/lib/xfr/Makefile.am
@@ -1,3 +1,5 @@
+SUBDIRS = . tests
+
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_srcdir)/src/lib/dns -I$(top_builddir)/src/lib/dns
 AM_CPPFLAGS += $(BOOST_INCLUDES)
diff --git a/src/lib/xfr/tests/Makefile.am b/src/lib/xfr/tests/Makefile.am
new file mode 100644
index 0000000..4abb456
--- /dev/null
+++ b/src/lib/xfr/tests/Makefile.am
@@ -0,0 +1,25 @@
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
+
+TESTS =
+if HAVE_GTEST
+TESTS += run_unittests
+run_unittests_SOURCES = run_unittests.cc client_test.cc
+
+run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/xfr/tests/client_test.cc b/src/lib/xfr/tests/client_test.cc
new file mode 100644
index 0000000..6c9f4ad
--- /dev/null
+++ b/src/lib/xfr/tests/client_test.cc
@@ -0,0 +1,37 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <sys/un.h>
+#include <string>
+
+#include <xfr/xfrout_client.h>
+
+using namespace std;
+using namespace isc::xfr;
+
+namespace {
+
+TEST(ClientTest, connetFile) {
+    // File path is too long
+    struct sockaddr_un s;     // can't be const; some compiler complains
+    EXPECT_THROW(XfroutClient(string(sizeof(s.sun_path), 'x')).connect(),
+                 XfroutError);
+
+    // File doesn't exist (we assume the file "no_such_file" doesn't exist)
+    EXPECT_THROW(XfroutClient("no_such_file").connect(), XfroutError);
+}
+
+}
diff --git a/src/lib/xfr/tests/run_unittests.cc b/src/lib/xfr/tests/run_unittests.cc
new file mode 100644
index 0000000..8dc59a2
--- /dev/null
+++ b/src/lib/xfr/tests/run_unittests.cc
@@ -0,0 +1,24 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
+
+int
+main(int argc, char* argv[]) {
+    ::testing::InitGoogleTest(&argc, argv);
+    isc::log::initLogger();
+    return (isc::util::unittests::run_all());
+}
diff --git a/src/lib/xfr/xfrout_client.cc b/src/lib/xfr/xfrout_client.cc
index 6ab905b..227ffc4 100644
--- a/src/lib/xfr/xfrout_client.cc
+++ b/src/lib/xfr/xfrout_client.cc
@@ -52,10 +52,11 @@ XfroutClient::~XfroutClient() {
 
 void
 XfroutClient::connect() {
-    asio::error_code err;
-    impl_->socket_.connect(stream_protocol::endpoint(impl_->file_path_), err);
-    if (err) {
-        isc_throw(XfroutError, "socket connect failed: " << err.message());
+    try {
+        impl_->socket_.connect(stream_protocol::endpoint(impl_->file_path_));
+    } catch (const asio::system_error& err) {
+        isc_throw(XfroutError, "socket connect failed for " <<
+                  impl_->file_path_ << ": " << err.what());
     }
 }
 
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
index fbbb3a2..5248316 100644
--- a/tests/lettuce/features/terrain/bind10_control.py
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -137,5 +137,8 @@ def send_command(step, command, cmdctl_port):
                                subprocess.PIPE, None)
     bindctl.stdin.write(command + "\n")
     bindctl.stdin.write("quit\n")
-    result = bindctl.wait()
-    assert result == 0, "bindctl exit code: " + str(result)
+    (stdout, stderr) = bindctl.communicate()
+    result = bindctl.returncode
+    assert result == 0, "bindctl exit code: " + str(result) +\
+                        "\nstdout:\n" + str(stdout) +\
+                        "stderr:\n" + str(stderr)
diff --git a/tests/lettuce/features/xfrin_bind10.feature b/tests/lettuce/features/xfrin_bind10.feature
index 23b2eda..70c3571 100644
--- a/tests/lettuce/features/xfrin_bind10.feature
+++ b/tests/lettuce/features/xfrin_bind10.feature
@@ -5,6 +5,7 @@ Feature: Xfrin
     Given I have bind10 running with configuration xfrin/retransfer_master.conf with cmdctl port 47804 as master
     And I have bind10 running with configuration xfrin/retransfer_slave.conf
     A query for www.example.org should have rcode REFUSED
+    Wait for bind10 stderr message CMDCTL_STARTED
     When I send bind10 the command Xfrin retransfer example.org IN 127.0.0.1 47807
     Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE
     A query for www.example.org should have rcode NOERROR
diff --git a/tests/tools/badpacket/badpacket.cc b/tests/tools/badpacket/badpacket.cc
index 86bbc47..be393d5 100644
--- a/tests/tools/badpacket/badpacket.cc
+++ b/tests/tools/badpacket/badpacket.cc
@@ -18,6 +18,7 @@
 #include <config.h>
 
 #include <exceptions/exceptions.h>
+#include <log/logger_support.h>
 #include "command_options.h"
 #include "scan.h"
 
@@ -44,6 +45,7 @@ using namespace isc::badpacket;
 
 /// \brief Main Program
 int main(int argc, char* argv[]) {
+    isc::log::initLogger("badpacket");
 
     try {
         // Parse command




More information about the bind10-changes mailing list