BIND 10 trac1383, updated. 9b2b249d23576c999a65d8c338e008cabe45f0c9 Merge branch 'master' into trac1383

BIND 10 source code commits bind10-changes at lists.isc.org
Fri Dec 9 10:45:01 UTC 2011


The branch, trac1383 has been updated
       via  9b2b249d23576c999a65d8c338e008cabe45f0c9 (commit)
       via  45274924f3f25b70547809eeda5dbcbe230029b5 (commit)
       via  a6a35e9970e4937924c1e33c01f6bd7eaf1ed994 (commit)
       via  c8dc421c5cad0f3296174b44f8deccfb69dec43f (commit)
       via  91fb141bfb3aadfdf96f13e157a26636f6e9f9e3 (commit)
       via  37a11387baa321daec8311fc66d5d83e567886bd (commit)
       via  cee6a7af5f4f116ea89ecfde9a235dfe727bf207 (commit)
       via  a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2 (commit)
       via  7f5229256540479ef63d707a14d194c80099fab3 (commit)
       via  e82cf73f21e06b1a301ff120fb5f73203d343111 (commit)
       via  777d6f3037738200a8a8426a0b957b18011d460a (commit)
       via  b705708aafbe566facc578a02b2f1cce44dff86f (commit)
       via  a56e72ce1bbc9d016a7ebd83eaba0aadbf2b41aa (commit)
       via  318dceaa39aa30ee9d394e1e096d4891f3bee490 (commit)
       via  25ac24557f8789f516ac7ffa1db831701ebf3c37 (commit)
       via  2a6b5e55caf422997b893e319db83855fe1709b1 (commit)
       via  83ecce54ed1ef5215f722e8339ae4a43f50ada5c (commit)
       via  8dcf5eebb1b81e6cdc963985daa6c80497ac8c16 (commit)
       via  662233a1483040da5dbc29dd9c9baf6bf0832223 (commit)
       via  0e3736c7c3e882ba3f0616b9d0877792edd73317 (commit)
       via  4b57a79735953705a82d8595a8ac541f7deb7a74 (commit)
       via  b41b7dc34a8a14339a1ff9daf1d705997d9abc43 (commit)
       via  6bda5426c6f8b4e9faefc2075575e1c98bc3907c (commit)
       via  6ff03bb9d631023175df99248e8cc0cda586c30a (commit)
       via  0c8c0f808a9ddc3a27a87f55964a965fb30f18ef (commit)
       via  df44752c7d69b865fa346215c1ec428153d5a3ca (commit)
       via  60fd293717cc45323cfb10cf06d5bd264fa083cc (commit)
       via  4220ef5ac9c8fdd4b506b3579f0e5eec98e3f3d8 (commit)
       via  56be59fbcdc0ba54ccea0d09d49ef28dace3d65d (commit)
       via  1341209064bc7afd8e720e3b12060239c368bcdd (commit)
       via  86a4ce45115dab4d3978c36dd2dbe07edcac02ac (commit)
       via  14a64484a3159a142f1b83a9830ac389a52f6a35 (commit)
       via  146203239c50d2a00069986944d4ec168f17b31f (commit)
       via  b7b90e50531bcbd2caaffe6b51aea8917a56f40d (commit)
       via  fd5713eae0276a3d623953c88fc6281aab0b71d3 (commit)
       via  4c0fd2ba248e5f925566e02724b34c85179a8c51 (commit)
       via  d408808ea8e4fe24a21d13992a88ad62c22634b8 (commit)
       via  cce182ed937f7294b2a11a3af5b2e03a380b7cd5 (commit)
       via  88f94cf8e025558b14091af5050e2ce424237ea0 (commit)
       via  4db174c8f096e2b54b3a5d384a6cffc25b9d9024 (commit)
       via  49f1f1bc36042d4b4c27a002e963f400deb694d7 (commit)
       via  3bb1cc7d961930edc38d9f8b34d0cccd3d69dd96 (commit)
       via  6583a47dde1b851aee99de3c38c6331a22ede260 (commit)
       via  99033305fa90310135d37118a0d47df3f2223770 (commit)
       via  b7bbe25fdf0d0c168c24c904c82c7e04fc269bba (commit)
       via  862b0e38047101afef0f6d81ba3f0d74c9a51ea5 (commit)
       via  cf5e4481b1f1fb00e9897e4cd0527a9a707c4a63 (commit)
       via  0ae049de728692224b087e95a645f45f4a69cb68 (commit)
       via  1d5d7d8ea8d4dae23783b71e4a93165a36124663 (commit)
       via  696f576c4743130bc8a77844c700582d5faaf391 (commit)
       via  9f792ee32ba42a44291277d0577196e03a929738 (commit)
       via  7a1997b2d44d05c179adecad0bf68c1f7c19935f (commit)
       via  e580e44a38c8dcf02e6ff40ba5977f18c8200572 (commit)
       via  660cf410c0fb41587b977df992879f5dff934c19 (commit)
       via  b09fdcc6b45d4580b138cc9f59bfc051bd6ad360 (commit)
       via  4d97ef5cdb4833a7a36b6679c16338505b07d4e3 (commit)
       via  424f32864efcd2c647c6e5303125b6a8afb421ea (commit)
       via  11885ff9e91c98ff0c4e93d81fc2b3d47a02090d (commit)
       via  0c53ade72d9589d3d521937652af20f9d7a20f8e (commit)
       via  cc20ff993da1ddb1c6e8a98370438b45a2be9e0a (commit)
       via  2142e8e6f760c577b58747c515c38fcc10168e04 (commit)
       via  402c03afffde1e664c9dbd7b3c40e78a23b261c5 (commit)
       via  b3c9fffb335e7a42ff07a99016df46ccaf3dae97 (commit)
       via  50fdb098fc80146a714794cb9156ac03de909103 (commit)
       via  a4f7763150aa2ced077f67bddbaa39255bd2fbf9 (commit)
       via  d3792aa7fc6ca920c7f9a3f36318ea1160974850 (commit)
       via  9351ba5d5461f8f93a169a99a5e378416a970bd5 (commit)
       via  8376919647ef84268085bfdd56271714416a6405 (commit)
       via  4e636c3e9a365304fc463929cdddcd555dcb3ad2 (commit)
       via  73721a97b1f3741bf58bf774601fef99a4ecb54f (commit)
       via  6764e7da0d5c6967e5607fc6e31c112895ed1827 (commit)
       via  40cfd32c280020af33a28c1501380a17ce604175 (commit)
       via  f9cbe6fb6e0a3d3dc03218429f530e6c01920169 (commit)
       via  18128a99fd70d66eb09312dd8dfa0f0521033f97 (commit)
       via  e7019de8a8ec9ff562557c9fc2a0bd28a4c64829 (commit)
       via  173de1cea65293e5f7cfb904454ee4fa96c51e1d (commit)
       via  e533dc83ccb7bf541e53f753c28a52248d7b195b (commit)
       via  2ae72d76c74f61a67590722c73ebbf631388acbd (commit)
       via  1ecbd2b16b44b6439030fd245f951fe5a538ecc5 (commit)
       via  83ce13c2d85068a1bec015361e4ef8c35590a5d0 (commit)
       via  5f34cb40976859771ab996113f78937310e7bda5 (commit)
       via  8ee52fc304388aef162a819e2e59066bb70f0618 (commit)
       via  3fdce88046bdad392bd89ea656ec4ac3c858ca2f (commit)
       via  6690b8985d656aba3f25082cb62c9c02e5ad5a0b (commit)
       via  7715c727d25d6430cbdbd82e40bdb7b3fa2ea843 (commit)
       via  6d921ed561b6ef9d26273ca321dfa24622a982b5 (commit)
       via  144549c04afdc36a98c7530eaebafb2b3d38d545 (commit)
       via  a26b979adb54baabdf939ed1a7852b2ee9b8b93c (commit)
       via  eb703a7e5b3749ca95a43c7582c9cccde564f123 (commit)
       via  4464612807e6c4bd120298ca105b0503af0d3110 (commit)
       via  cbe600decbef4db82cb3b070e03b5702540af4aa (commit)
       via  0b5da8bd0800bfa3744e23c367cee2c38de7a497 (commit)
       via  7ac21664665acee54a2a57331a2afc2a0c0a3530 (commit)
       via  96a32f0ab063cbcd98fae0d5a87bc286bb8a7498 (commit)
       via  7019db2a44f39897486eea618f4447c37dbabcf8 (commit)
       via  024808d2a40b695f6c7191398c5a3d2c39c26736 (commit)
       via  9df50bec4e691dc8cb724547659fb71caad656ab (commit)
       via  3a206ab523d4612676362274ae8c30def53ac15e (commit)
       via  15dffb02f179974c6726f16aff586c49eec8c7ca (commit)
       via  ad90525811869e2ff6fb5e24d38bf19e5743767e (commit)
       via  936d5cad35355e1785550f7150f90e688166f448 (commit)
       via  0737908f9e7cb615f80354131dca4df1a8c0bff6 (commit)
       via  d6d7a352b0b0df685f285cd413568b0e475339da (commit)
       via  82fdeb65eba233a63b4425c7c5528a6257b91703 (commit)
       via  5832af30821d4d4d077946b063b8f53056fa7e60 (commit)
       via  1d24818f927edb1840f673f1ba67d7a45d9ef1c2 (commit)
       via  f06cabd31d8e43781e4e32bdfdf24c78931d3ca8 (commit)
       via  0e945f09e0e127e5097c32e5c84d96e34a18b3b6 (commit)
       via  c9be5877151f0564725d1cd9a20fe393fe7b422f (commit)
       via  560122414abc11fa2a39331734c607cc37a4e76c (commit)
       via  5468c7defd530b29696108cbda6d278b14be351b (commit)
       via  64853ae0cca2070a0536ee6f499084c8a9017fa2 (commit)
       via  1d8a592d1301b7e3a39c88ce1e001122db125307 (commit)
       via  9682bf18607745c83437cd4592d3289e68410772 (commit)
       via  20a6000a9f69476797477ca7af5fd83b8e236909 (commit)
       via  537af1705fc5c1695b4b601571f65ead81dc1289 (commit)
       via  1485c897a9e2c71ed2a33c8972c116a5f7e8e078 (commit)
       via  b7ac17da5405582098e98ed22bf122fe87658923 (commit)
       via  ada65c7115b9840f509f2b0c640368735fe69520 (commit)
       via  b8d14d2e45ee719e4e33adbecddafb4ae3aa4df1 (commit)
       via  966fdcc69001cd2562ca96b392b9a45e7c336224 (commit)
       via  567260cde6e21499ad4bf47789d538a929df5552 (commit)
       via  c789138250b33b6b08262425a08a2a0469d90433 (commit)
       via  b092df6f17e5d8f8f07e726fc4006e346417d49f (commit)
       via  d9b851b96c9fb3f56c4fe3a626f5c2b05bbb7a5f (commit)
       via  9300ad5a1030e50ab76ff8a6f87b4d91d2d2b124 (commit)
       via  eb6053d466fcea08fa66205d598d316e550863c8 (commit)
       via  42d4a37a121ea7df3440268fe15995267fb66b12 (commit)
       via  1b186f0a6fc242fa6dff08944ef43b60010d3631 (commit)
       via  34de4dab534c2ccc735f6c815aa0459553aa1153 (commit)
       via  b6568546ccdac044fd30200a54708f9418e7af9d (commit)
       via  936511f6e114f26bf86497466a7f61ef467bf5ad (commit)
       via  7f573f432cfca90d2f9409829f14b3645083b9af (commit)
       via  b586771730eb1d22330e3a4f46c6c596d6ab57da (commit)
       via  137abb738558ae9602f834890f477a924b520001 (commit)
       via  14c51c664a98beb4867728d528190aff335e6f27 (commit)
       via  6a4afc2165e4e6e692e71cb6795201c9df5afee2 (commit)
       via  047ea7f6cfa2677865dcf441726dcc3e082608a9 (commit)
       via  07b884ef0f72044fa5a5fd661ab068794ff68ca6 (commit)
       via  51f3cb54492ef02e4951afb15a9c40ba0cdff4ce (commit)
       via  51c9278d000daee776c5e12456d8c4ea60ff5f21 (commit)
       via  4ca30d27a1149bf5c445f382c4767b5c4e168d95 (commit)
       via  f6def2435fe72e00a782244461e8a186a4a23e63 (commit)
       via  75fc5817606b892c8eeb4964dc2f3d2f044f5491 (commit)
       via  3e1a6afcabbef24f9501d8f4e3ed27d824db4309 (commit)
       via  0dedcdb128646fdbf37be96f91076adda2f37c95 (commit)
       via  fc6a79af0d625ca18a2cdc3df91e86e8c1e02f9c (commit)
       via  5a2d958780a4a671cd8df9080d99ff95dd16772d (commit)
       via  075e3787986676c7491f157931b6f7da1773db0a (commit)
       via  7d2f07481169780071bf564223a20a219b550385 (commit)
       via  d5e189cf1573446503a4fafa3e909db60eb04623 (commit)
       via  0b6937d0e075e1192c41891ae138532f2c733b47 (commit)
       via  5371b694b6cc564c3f1899a935769dd024f38e56 (commit)
       via  837002896937febe208c141912fc4f8c3beaa2ab (commit)
       via  36dc8dd6f15a42f401ffa32829ed7c436e529eb3 (commit)
       via  c5117dc4d2fd89f1a66849713c6a3cd51735699f (commit)
       via  5d7004d0ac4fe553a61fd2eb99a8af3eb7324956 (commit)
       via  fc0fe98a085ece85e143188c5647740f95d347bc (commit)
       via  456933355bf3bc2db5a6c52ba4dc6d8e826ce6e1 (commit)
       via  657349ae281dcdf737b187d0be2cd7d0e4fa92a7 (commit)
       via  a7505fac495a9746d8bf3e9a2f4a3aa8541b85c2 (commit)
       via  2cd7de7f848f743ee31c356fd7edc9231ba6ca3a (commit)
       via  1468dd9e7bc1e0a045cdab88d1db815cc7e2bd52 (commit)
       via  3582ccf1eb2093d34e944bcda5ea2069158349dc (commit)
       via  d64cd3aa3d095ad5f0e8054e8b2b2cabdab18d3f (commit)
       via  0b7c39d9dcd44dfba0caf6e9353f00f47bbe7e9c (commit)
       via  d23827556ec500284bd155cdb731213343030f53 (commit)
      from  c0be7a6c0e12c78a7e02a2a3b3b259a3382b52bf (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 9b2b249d23576c999a65d8c338e008cabe45f0c9
Merge: c0be7a6c0e12c78a7e02a2a3b3b259a3382b52bf 45274924f3f25b70547809eeda5dbcbe230029b5
Author: Stephen Morris <stephen at isc.org>
Date:   Fri Dec 9 10:28:41 2011 +0000

    Merge branch 'master' into trac1383

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                          |   71 +++-
 configure.ac                                       |  109 ++++--
 src/bin/auth/benchmarks/Makefile.am                |    2 +-
 src/bin/auth/query.cc                              |    9 +-
 src/bin/auth/query.h                               |    2 +-
 src/bin/auth/tests/query_unittest.cc               |   79 +++-
 src/bin/bind10/bind10_messages.mes                 |   11 +
 src/bin/bind10/bind10_src.py.in                    |  351 +++++++++++----
 src/bin/bind10/bob.spec                            |    4 +-
 src/bin/bind10/tests/bind10_test.py.in             |  472 +++++++++++++++++++-
 src/bin/dhcp6/.gitignore                           |    1 +
 src/bin/dhcp6/dhcp6_srv.cc                         |   30 +-
 src/bin/dhcp6/dhcp6_srv.h                          |   11 +-
 src/bin/dhcp6/iface_mgr.cc                         |  436 +++++++++++++------
 src/bin/dhcp6/iface_mgr.h                          |  269 ++++++++++--
 src/bin/dhcp6/tests/Makefile.am                    |    4 +-
 src/bin/dhcp6/tests/dhcp6_srv_unittest.cc          |   23 +-
 src/bin/dhcp6/tests/iface_mgr_unittest.cc          |  264 +++++++++---
 src/bin/resolver/tests/Makefile.am                 |    2 +-
 src/bin/xfrin/tests/Makefile.am                    |    1 +
 src/bin/xfrin/tests/xfrin_test.py                  |  332 ++++++++++++---
 src/bin/xfrin/xfrin.py.in                          |  337 ++++++++++-----
 src/bin/xfrin/xfrin_messages.mes                   |   69 +++-
 src/bin/xfrout/tests/xfrout_test.py.in             |  184 +++++++-
 src/bin/xfrout/xfrout.py.in                        |   58 ++-
 src/lib/asiolink/Makefile.am                       |    3 +
 src/lib/cryptolink/Makefile.am                     |    3 +-
 src/lib/cryptolink/tests/Makefile.am               |    4 +-
 src/lib/datasrc/Makefile.am                        |   13 +-
 src/lib/datasrc/database.cc                        |   12 +-
 .../datasrc_config.h.pre.in}                       |   28 +-
 src/lib/datasrc/factory.cc                         |   53 +++-
 src/lib/datasrc/factory.h                          |   11 +-
 src/lib/datasrc/tests/Makefile.am                  |   25 +-
 src/lib/datasrc/tests/factory_unittest.cc          |   65 +++
 src/lib/dhcp/Makefile.am                           |    1 +
 src/lib/dhcp/libdhcp.cc                            |   12 +-
 src/lib/dhcp/option.cc                             |   31 +--
 src/lib/dhcp/option.h                              |   27 +-
 src/lib/dhcp/option4_addrlst.cc                    |  135 ++++++
 src/lib/dhcp/option4_addrlst.h                     |  167 +++++++
 src/lib/dhcp/option6_addrlst.cc                    |    6 +-
 src/lib/dhcp/option6_addrlst.h                     |   19 +-
 src/lib/dhcp/option6_ia.cc                         |    6 +-
 src/lib/dhcp/option6_ia.h                          |    2 +-
 src/lib/dhcp/option6_iaaddr.cc                     |    4 +-
 src/lib/dhcp/option6_iaaddr.h                      |    3 +-
 src/lib/dhcp/pkt4.cc                               |   53 ++-
 src/lib/dhcp/pkt4.h                                |   24 +-
 src/lib/dhcp/tests/Makefile.am                     |    1 +
 src/lib/dhcp/tests/option4_addrlst_unittest.cc     |  273 +++++++++++
 src/lib/dhcp/tests/option_unittest.cc              |    2 +
 src/lib/dhcp/tests/pkt4_unittest.cc                |   17 +-
 src/lib/dns/Makefile.am                            |    3 +
 src/lib/dns/python/Makefile.am                     |    1 +
 src/lib/dns/python/pydnspp.cc                      |   17 +
 src/lib/dns/python/serial_python.cc                |  281 ++++++++++++
 .../python/{opcode_python.h => serial_python.h}    |   32 +-
 src/lib/dns/python/tests/Makefile.am               |    1 +
 src/lib/dns/python/tests/serial_python_test.py     |  111 +++++
 src/lib/dns/rdata/generic/soa_6.cc                 |    4 +-
 src/lib/dns/rdata/generic/soa_6.h                  |    3 +-
 src/lib/dns/serial.cc                              |   76 ++++
 src/lib/dns/serial.h                               |  155 +++++++
 src/lib/dns/tests/Makefile.am                      |    7 +-
 src/lib/dns/tests/rdata_soa_unittest.cc            |    2 +-
 src/lib/dns/tests/serial_unittest.cc               |  179 ++++++++
 src/lib/log/Makefile.am                            |    3 +-
 src/lib/log/tests/Makefile.am                      |   10 +-
 src/lib/python/isc/bind10/Makefile.am              |    3 +-
 src/lib/python/isc/bind10/socket_cache.py          |  302 +++++++++++++
 src/lib/python/isc/bind10/special_component.py     |   14 +-
 src/lib/python/isc/bind10/tests/Makefile.am        |    2 +-
 src/lib/python/isc/bind10/tests/component_test.py  |    4 -
 .../python/isc/bind10/tests/sockcreator_test.py    |    3 -
 .../python/isc/bind10/tests/socket_cache_test.py   |  396 ++++++++++++++++
 src/lib/python/isc/datasrc/Makefile.am             |    1 +
 src/lib/python/isc/datasrc/finder_inc.cc           |    5 +-
 src/lib/python/isc/datasrc/finder_python.cc        |   29 +-
 src/lib/python/isc/datasrc/tests/Makefile.am       |    1 +
 src/lib/python/isc/datasrc/tests/datasrc_test.py   |   48 ++
 src/lib/python/isc/notify/notify_out.py            |   15 +-
 src/lib/python/isc/notify/tests/Makefile.am        |    1 +
 src/lib/python/isc/testutils/rrset_utils.py        |   19 +
 src/lib/xfr/Makefile.am                            |    2 +
 src/lib/{cryptolink => xfr}/tests/Makefile.am      |   14 +-
 .../tempdir.h.in => xfr/tests/client_test.cc}      |   26 +-
 src/lib/{acl => xfr}/tests/run_unittests.cc        |    0 
 src/lib/xfr/xfrout_client.cc                       |    9 +-
 tests/lettuce/features/terrain/bind10_control.py   |    7 +-
 tests/lettuce/features/xfrin_bind10.feature        |    1 +
 91 files changed, 5073 insertions(+), 845 deletions(-)
 copy src/lib/{python/isc/datasrc/client_python.h => datasrc/datasrc_config.h.pre.in} (60%)
 create mode 100644 src/lib/dhcp/option4_addrlst.cc
 create mode 100644 src/lib/dhcp/option4_addrlst.h
 create mode 100644 src/lib/dhcp/tests/option4_addrlst_unittest.cc
 create mode 100644 src/lib/dns/python/serial_python.cc
 copy src/lib/dns/python/{opcode_python.h => serial_python.h} (66%)
 create mode 100644 src/lib/dns/python/tests/serial_python_test.py
 create mode 100644 src/lib/dns/serial.cc
 create mode 100644 src/lib/dns/serial.h
 create mode 100644 src/lib/dns/tests/serial_unittest.cc
 create mode 100644 src/lib/python/isc/bind10/socket_cache.py
 create mode 100644 src/lib/python/isc/bind10/tests/socket_cache_test.py
 copy src/lib/{cryptolink => xfr}/tests/Makefile.am (54%)
 copy src/lib/{log/tests/tempdir.h.in => xfr/tests/client_test.cc} (61%)
 copy src/lib/{acl => xfr}/tests/run_unittests.cc (100%)

-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 7644588..e8f57ba 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,72 @@
+341.	[func]		tomek
+	libdhcp++: Support for handling both IPv4 and IPv6 added.
+	Also added support for binding IPv4 sockets.
+	(Trac #1238, git 86a4ce45115dab4d3978c36dd2dbe07edcac02ac)
+
+340.	[build]		jelte
+	Fixed several linker issues related to recent gcc versions, botan
+	and gtest.
+	(Trac #1442, git 91fb141bfb3aadfdf96f13e157a26636f6e9f9e3)
+
+339.	[bug]		jinmei
+	libxfr, used by b10-auth to share TCP sockets with b10-xfrout,
+	incorrectly propagated ASIO specific exceptions to the application
+	if the given file name was too long.  This could lead to
+	unexpected shut down of b10-auth.
+	(Trac #1387, git a5e9d9176e9c60ef20c0f5ef59eeb6838ed47ab2)
+
+338.	[bug]		jinmei
+	b10-xfrin didn't check SOA serials of SOA and IXFR responses,
+	which resulted in unnecessary transfer or unexpected IXFR
+	timeouts (these issues were not overlooked but deferred to be
+	fixed until #1278 was completed).  Validation on responses to SOA
+	queries were tightened, too.
+	(Trac #1299, git 6ff03bb9d631023175df99248e8cc0cda586c30a)
+
+337.	[func]		tomek
+	libdhcp++: Support for DHCPv4 option that can store a single
+	address or a list of IPv4 addresses added. Support for END option
+	added.
+	(Trac #1350, git cc20ff993da1ddb1c6e8a98370438b45a2be9e0a)
+
+336.	[func]		jelte
+	libdns++ (and its python wrapper) now includes a class Serial, for 
+	SOA SERIAL comparison and addition. Operations on instances of this 
+	class follow the specification from RFC 1982. 
+	Rdata::SOA::getSerial() now returns values of this type (and not 
+	uint32_t).
+	(Trac #1278, git 2ae72d76c74f61a67590722c73ebbf631388acbd)
+
+335.	[bug]*		jelte
+	The DataSourceClientContainer class that dynamically loads 
+	datasource backend libraries no longer provides just a .so file name 
+	to its call to dlopen(), but passes it an absolute path. This means 
+	that it is no longer an system implementation detail that depends on 
+	[DY]LD_LIBRARY_PATH which file is chosen, should there be multiple 
+	options (for instance, when test-running a new build while a 
+	different version is installed).
+	These loadable libraries are also no longer installed in the default 
+	library path, but in a subdirectory of the libexec directory of the 
+	target ($prefix/libexec/[version]/backends).
+	This also removes the need to handle b10-xfin and b10-xfrout as 
+	'special' hardcoded components, and they are now started as regular 
+	components as dictated by the configuration of the boss process.
+	(Trac #1292, git 83ce13c2d85068a1bec015361e4ef8c35590a5d0)
+
+334.	[bug]		jinmei
+	b10-xfrout could potentially create an overflow response message
+	(exceeding the 64KB max) or could create unnecessarily small
+	messages.  The former was actually unlikely to happen due to the
+	effect of name compression, and the latter was marginal and at least
+	shouldn't cause an interoperability problem, but these were still
+	potential problems and were fixed.
+	(Trac #1389, git 3fdce88046bdad392bd89ea656ec4ac3c858ca2f)
+
+333.    [bug]		dvv
+	Solaris needs "-z now" to force non-lazy binding and prevent g++ static
+	initialization code from deadlocking.
+	(Trac #1439, git c789138250b33b6b08262425a08a2a0469d90433)
+
 332.    [bug]		vorner
 	C++ exceptions in the isc.dns.Rdata wrapper are now converted
 	to python ones instead of just aborting the interpretter.
@@ -33,7 +102,7 @@ bind10-devel-20111128 released on November 28, 2011
 	always respond to IXFR requests according to RFC1995).
 	(Trac #1371 and #1372, git 80c131f5b0763753d199b0fb9b51f10990bcd92b)
 
-326.	[build]*		jinmei
+326.	[build]*	jinmei
 	Added a check script for the SQLite3 schema version.  It will be
 	run at the beginning of 'make install', and if it detects an old
 	version of schema, installation will stop.  You'll then need to
diff --git a/configure.ac b/configure.ac
index 26c1e34..e370e21 100644
--- a/configure.ac
+++ b/configure.ac
@@ -96,6 +96,8 @@ case "$host" in
 	# Solaris requires special definitions to get some standard libraries
 	# (e.g. getopt(3)) available with common used header files.
 	CPPFLAGS="$CPPFLAGS -D_XPG4_2 -D__EXTENSIONS__"
+	# "now" binding is necessary to prevent deadlocks in C++ static initialization code
+	LDFLAGS="$LDFLAGS -z now"
 	;;
 *-apple-darwin*)
 	# libtool doesn't work perfectly with Darwin: libtool embeds the
@@ -478,23 +480,33 @@ else
     fi
 fi
 
-BOTAN_LDFLAGS=`${BOTAN_CONFIG} --libs`
+BOTAN_LIBS=`${BOTAN_CONFIG} --libs`
 BOTAN_INCLUDES=`${BOTAN_CONFIG} --cflags`
 
 # We expect botan-config --libs to contain -L<path_to_libbotan>, but
 # this is not always the case.  As a heuristics workaround we add
-# -L`botan-config --prefix/lib` in this case.  Same for BOTAN_INCLUDES
-# (but using include instead of lib) below.
+# -L`botan-config --prefix/lib` in this case (if not present already).
+# Same for BOTAN_INCLUDES (but using include instead of lib) below.
 if [ $BOTAN_CONFIG --prefix >/dev/null 2>&1 ] ; then
-    echo ${BOTAN_LDFLAGS} | grep -- -L > /dev/null || \
-        BOTAN_LDFLAGS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LDFLAGS}"
+    echo ${BOTAN_LIBS} | grep -- -L > /dev/null || \
+        BOTAN_LIBS="-L`${BOTAN_CONFIG} --prefix`/lib ${BOTAN_LIBS}"
     echo ${BOTAN_INCLUDES} | grep -- -I > /dev/null || \
         BOTAN_INCLUDES="-I`${BOTAN_CONFIG} --prefix`/include ${BOTAN_INCLUDES}"
 fi
+
+# botan-config script (and the way we call pkg-config) returns -L and -l
+# as one string, but we need them in separate values
+BOTAN_LDFLAGS=
+BOTAN_NEWLIBS=
+for flag in ${BOTAN_LIBS}; do
+    BOTAN_LDFLAGS="${BOTAN_LDFLAGS} `echo $flag | sed -ne '/^\(\-L\)/p'`"
+    BOTAN_LIBS="${BOTAN_LIBS} `echo $flag | sed -ne '/^\(\-l\)/p'`"
+done
+
 # See python_rpath for some info on why we do this
 if test $rpath_available = yes; then
     BOTAN_RPATH=
-    for flag in ${BOTAN_LDFLAGS}; do
+    for flag in ${BOTAN_LIBS}; do
             BOTAN_RPATH="${BOTAN_RPATH} `echo $flag | sed -ne 's/^\(\-L\)/-R/p'`"
     done
 AC_SUBST(BOTAN_RPATH)
@@ -510,13 +522,13 @@ AC_SUBST(BOTAN_RPATH)
 fi
 
 AC_SUBST(BOTAN_LDFLAGS)
+AC_SUBST(BOTAN_LIBS)
 AC_SUBST(BOTAN_INCLUDES)
 
 CPPFLAGS_SAVED=$CPPFLAGS
 CPPFLAGS="$BOTAN_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$BOTAN_LDFLAGS $LDFLAGS"
-
+LIBS_SAVED="$LIBS"
+LIBS="$LIBS $BOTAN_LIBS"
 AC_CHECK_HEADERS([botan/botan.h],,AC_MSG_ERROR([Missing required header files.]))
 AC_LINK_IFELSE(
         [AC_LANG_PROGRAM([#include <botan/botan.h>
@@ -531,7 +543,7 @@ AC_LINK_IFELSE(
          AC_MSG_ERROR([Needs Botan library 1.8 or higher])]
 )
 CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
 
 # Check for log4cplus
 log4cplus_path="yes"
@@ -543,7 +555,7 @@ if test "${log4cplus_path}" = "no" ; then
     AC_MSG_ERROR([Need log4cplus])
 elif test "${log4cplus_path}" != "yes" ; then
   LOG4CPLUS_INCLUDES="-I${log4cplus_path}/include"
-  LOG4CPLUS_LDFLAGS="-L${log4cplus_path}/lib"
+  LOG4CPLUS_LIBS="-L${log4cplus_path}/lib"
 else
 # If not specified, try some common paths.
 	log4cplusdirs="/usr/local /usr/pkg /opt /opt/local"
@@ -551,21 +563,21 @@ else
 	do
 		if test -f $d/include/log4cplus/logger.h; then
 			LOG4CPLUS_INCLUDES="-I$d/include"
-			LOG4CPLUS_LDFLAGS="-L$d/lib"
+			LOG4CPLUS_LIBS="-L$d/lib"
 			break
 		fi
 	done
 fi
 
-LOG4CPLUS_LDFLAGS="$LOG4CPLUS_LDFLAGS -llog4cplus $MULTITHREADING_FLAG"
+LOG4CPLUS_LIBS="$LOG4CPLUS_LIBS -llog4cplus $MULTITHREADING_FLAG"
 
-AC_SUBST(LOG4CPLUS_LDFLAGS)
+AC_SUBST(LOG4CPLUS_LIBS)
 AC_SUBST(LOG4CPLUS_INCLUDES)
 
 CPPFLAGS_SAVED=$CPPFLAGS
 CPPFLAGS="$LOG4CPLUS_INCLUDES $CPPFLAGS"
-LDFLAGS_SAVED="$LDFLAGS"
-LDFLAGS="$LOG4CPLUS_LDFLAGS $LDFLAGS"
+LIBS_SAVED="$LIBS"
+LIBS="$LOG4CPLUS_LIBS $LIBS"
 
 AC_CHECK_HEADERS([log4cplus/logger.h],,AC_MSG_ERROR([Missing required header files.]))
 AC_LINK_IFELSE(
@@ -580,7 +592,7 @@ AC_LINK_IFELSE(
 )
 
 CPPFLAGS=$CPPFLAGS_SAVED
-LDFLAGS=$LDFLAGS_SAVED
+LIBS=$LIBS_SAVED
 
 #
 # Configure Boost header path
@@ -673,6 +685,13 @@ else
     AM_CONDITIONAL(NEED_LIBBOOST_THREAD, test "${use_boost_threads}" = "yes")
 fi
 
+# I can't get some of the #include <asio.hpp> right without this
+# TODO: find the real cause of asio/boost wanting pthreads
+# (this currently only occurs for src/lib/cc/session_unittests)
+PTHREAD_LDFLAGS=
+AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
+AC_SUBST(PTHREAD_LDFLAGS)
+AC_SUBST(MULTITHREADING_FLAG)
 
 #
 # Check availability of gtest, which will be used for unit tests.
@@ -709,6 +728,48 @@ then
 				GTEST_LDFLAGS="-L$dir/lib"
 				GTEST_LDADD="-lgtest"
 				GTEST_FOUND="true"
+				# There is no gtest-config script on this
+				# system, which is supposed to inform us
+				# whether we need pthreads as well (a 
+				# gtest compile-time option). So we still
+				# need to test that manually.
+				CPPFLAGS_SAVED="$CPPFLAGS"
+				CPPFLAGS="$CPPFLAGS $GTEST_INCLUDES"
+				LDFLAGS_SAVED="$LDFLAGS"
+				LDFLAGS="$LDFLAGS $GTEST_LDFLAGS"
+				LIBS_SAVED=$LIBS
+				LIBS="$LIBS $GTEST_LDADD"
+				AC_MSG_CHECKING([Checking whether gtest tests need pthreads])
+				# First try to compile without pthreads
+				AC_TRY_LINK([
+					#include <gtest/gtest.h>
+					],[
+						int i = 0;
+						char* c = NULL;
+						::testing::InitGoogleTest(&i, &c);
+						return (0);
+					],
+					[ AC_MSG_RESULT(no) ],
+					[
+						LIBS="$SAVED_LIBS $GTEST_LDADD $PTHREAD_LDFLAGS"
+						# Now try to compile with pthreads
+						AC_TRY_LINK([
+							#include <gtest/gtest.h>
+							],[
+								int i = 0;
+								char* c = NULL;
+								::testing::InitGoogleTest(&i, &c);
+								return (0);
+							],
+							[ AC_MSG_RESULT(yes)
+							  GTEST_LDADD="$GTEST_LDADD $PTHREAD_LDFLAGS"
+							],
+							# Apparently we can't compile it at all
+							[ AC_MSG_ERROR(unable to compile with gtest) ])
+				])
+				CPPFLAGS=$CPPFLAGS_SAVED
+				LDFLAGS=$LDFLAGS_SAVED
+				LIBS=$LIBS_SAVED
 				break
 			fi
 		done
@@ -735,15 +796,6 @@ if test "x$HAVE_PKG_CONFIG" = "xno" ; then
 fi
 PKG_CHECK_MODULES(SQLITE, sqlite3 >= 3.3.9, enable_features="$enable_features SQLite3")
 
-# I can't get some of the #include <asio.hpp> right without this
-# TODO: find the real cause of asio/boost wanting pthreads
-# (this currently only occurs for src/lib/cc/session_unittests)
-PTHREAD_LDFLAGS=
-AC_CHECK_LIB(pthread, pthread_create,[ PTHREAD_LDFLAGS=-lpthread ], [])
-AC_SUBST(PTHREAD_LDFLAGS)
-
-AC_SUBST(MULTITHREADING_FLAG)
-
 #
 # ASIO: we extensively use it as the C++ event management module.
 #
@@ -910,6 +962,7 @@ AC_CONFIG_FILES([Makefile
                  src/lib/datasrc/tests/Makefile
                  src/lib/datasrc/tests/testdata/Makefile
                  src/lib/xfr/Makefile
+                 src/lib/xfr/tests/Makefile
                  src/lib/log/Makefile
                  src/lib/log/compiler/Makefile
                  src/lib/log/tests/Makefile
@@ -990,6 +1043,7 @@ AC_OUTPUT([doc/version.ent
            src/lib/python/bind10_config.py
            src/lib/cc/session_config.h.pre
            src/lib/cc/tests/session_unittests_config.h
+           src/lib/datasrc/datasrc_config.h.pre
            src/lib/log/tests/console_test.sh
            src/lib/log/tests/destination_test.sh
            src/lib/log/tests/init_logger_test.sh
@@ -1084,8 +1138,9 @@ dnl includes too
   Boost:         ${BOOST_INCLUDES}
   Botan:         ${BOTAN_INCLUDES}
                  ${BOTAN_LDFLAGS}
+                 ${BOTAN_LIBS}
   Log4cplus:     ${LOG4CPLUS_INCLUDES}
-                 ${LOG4CPLUS_LDFLAGS}
+                 ${LOG4CPLUS_LIBS}
   SQLite:        $SQLITE_CFLAGS
                  $SQLITE_LIBS
 
diff --git a/src/bin/auth/benchmarks/Makefile.am b/src/bin/auth/benchmarks/Makefile.am
index 53c019f..dd00ea5 100644
--- a/src/bin/auth/benchmarks/Makefile.am
+++ b/src/bin/auth/benchmarks/Makefile.am
@@ -32,8 +32,8 @@ query_bench_LDADD += $(top_builddir)/src/lib/cc/libcc.la
 query_bench_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 query_bench_LDADD += $(top_builddir)/src/lib/log/liblog.la
 query_bench_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
 query_bench_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 query_bench_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+query_bench_LDADD += $(top_builddir)/src/lib/asiodns/libasiodns.la
 query_bench_LDADD += $(SQLITE_LIBS)
 
diff --git a/src/bin/auth/query.cc b/src/bin/auth/query.cc
index b7ee3b6..f159262 100644
--- a/src/bin/auth/query.cc
+++ b/src/bin/auth/query.cc
@@ -117,7 +117,6 @@ void
 Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
     if (nsec->getRdataCount() == 0) {
         isc_throw(BadNSEC, "NSEC for NXDOMAIN is empty");
-        return;
     }
 
     // Add the NSEC proving NXDOMAIN to the authority section.
@@ -152,7 +151,6 @@ Query::addNXDOMAINProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
     if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
         fresult.rrset->getRdataCount() == 0) {
         isc_throw(BadNSEC, "Unexpected result for wildcard NXDOMAIN proof");
-        return;
     }
 
     // Add the (no-) wildcard proof only when it's different from the NSEC
@@ -178,7 +176,6 @@ Query::addWildcardProof(ZoneFinder& finder) {
     if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
         fresult.rrset->getRdataCount() == 0) {
         isc_throw(BadNSEC, "Unexpected result for wildcard proof");
-        return;
     }
     response_.addRRset(Message::SECTION_AUTHORITY,
                        boost::const_pointer_cast<RRset>(fresult.rrset),
@@ -186,12 +183,11 @@ Query::addWildcardProof(ZoneFinder& finder) {
 }
 
 void
-Query::addWildcardNxrrsetProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
+Query::addWildcardNXRRSETProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
     // There should be one NSEC RR which was found in the zone to prove
     // that there is not matched <QNAME,QTYPE> via wildcard expansion.
     if (nsec->getRdataCount() == 0) {
         isc_throw(BadNSEC, "NSEC for WILDCARD_NXRRSET is empty");
-        return;
     }
     // Add this NSEC RR to authority section.
     response_.addRRset(Message::SECTION_AUTHORITY,
@@ -203,7 +199,6 @@ Query::addWildcardNxrrsetProof(ZoneFinder& finder, ConstRRsetPtr nsec) {
     if (fresult.code != ZoneFinder::NXDOMAIN || !fresult.rrset ||
         fresult.rrset->getRdataCount() == 0) {
         isc_throw(BadNSEC, "Unexpected result for no match QNAME proof");
-        return;
     }
    
     if (nsec->getName() != fresult.rrset->getName()) {
@@ -387,7 +382,7 @@ Query::process() {
             case ZoneFinder::WILDCARD_NXRRSET:
                 addSOA(*result.zone_finder);
                 if (dnssec_ && db_result.rrset) {
-                    addWildcardNxrrsetProof(zfinder,db_result.rrset);
+                    addWildcardNXRRSETProof(zfinder, db_result.rrset);
                 }
                 break;
             default:
diff --git a/src/bin/auth/query.h b/src/bin/auth/query.h
index 681feb2..43a8b6b 100644
--- a/src/bin/auth/query.h
+++ b/src/bin/auth/query.h
@@ -91,7 +91,7 @@ private:
     /// query is to be found.
     /// \param nsec The RRset (NSEC RR) which proved that there is no matched 
     /// <QNAME,QTTYPE>.
-    void addWildcardNxrrsetProof(isc::datasrc::ZoneFinder& finder,
+    void addWildcardNXRRSETProof(isc::datasrc::ZoneFinder& finder,
                                  isc::dns::ConstRRsetPtr nsec);
     
     /// \brief Look up additional data (i.e., address records for the names
diff --git a/src/bin/auth/tests/query_unittest.cc b/src/bin/auth/tests/query_unittest.cc
index 43a2077..14067ab 100644
--- a/src/bin/auth/tests/query_unittest.cc
+++ b/src/bin/auth/tests/query_unittest.cc
@@ -109,6 +109,13 @@ const char* const wild_txt_next =
     "www.uwild.example.com. 3600 IN A 192.0.2.11\n";
 const char* const nsec_wild_txt_next =
     "www.uwild.example.com. 3600 IN NSEC *.wild.example.com. A NSEC RRSIG\n";
+// Wildcard empty
+const char* const empty_txt = "b.*.t.example.com. 3600 IN A 192.0.2.13\n";
+const char* const nsec_empty_txt =
+    "b.*.t.example.com. 3600 IN NSEC *.uwild.example.com. A NSEC RRSIG\n";
+const char* const empty_prev_txt = "t.example.com. 3600 IN A 192.0.2.15\n";
+const char* const nsec_empty_prev_txt =
+    "t.example.com. 3600 IN NSEC b.*.t.example.com. A NSEC RRSIG\n";
 // Used in NXDOMAIN proof test.  We are going to test some unusual case where
 // the best possible wildcard is below the "next domain" of the NSEC RR that
 // proves the NXDOMAIN, i.e.,
@@ -188,8 +195,9 @@ public:
             nsec_apex_txt << nsec_mx_txt << nsec_no_txt << nsec_nz_txt <<
             nsec_nxdomain_txt << nsec_www_txt << nonsec_a_txt <<
             wild_txt << nsec_wild_txt << cnamewild_txt << nsec_cnamewild_txt <<
-            wild_txt_nxrrset<<nsec_wild_txt_nxrrset<<wild_txt_next<<
-            nsec_wild_txt_next;
+            wild_txt_nxrrset << nsec_wild_txt_nxrrset << wild_txt_next <<
+            nsec_wild_txt_next << empty_txt << nsec_empty_txt <<
+            empty_prev_txt << nsec_empty_prev_txt;
 
         masterLoad(zone_stream, origin_, rrclass_,
                    boost::bind(&MockZoneFinder::loadRRset, this, _1));
@@ -407,24 +415,45 @@ MockZoneFinder::find(const Name& name, const RRType& type,
     // due to the existence of closer name.
     if ((options & NO_WILDCARD) == 0) {
         const Name wild_suffix(name.split(1));
+        // Unit Tests use those domains for Wildcard test.
         if (name.equals(Name("www.wild.example.com"))||
-           name.equals(Name("www1.uwild.example.com"))) {
+           name.equals(Name("www1.uwild.example.com"))||
+           name.equals(Name("a.t.example.com"))) {
             if (name.compare(wild_suffix).getRelation() ==
                 NameComparisonResult::SUBDOMAIN) {
                 domain = domains_.find(Name("*").concatenate(wild_suffix));
-                assert(domain != domains_.end());
-                RRsetStore::const_iterator found_rrset = domain->second.find(type);
-                if (found_rrset != domain->second.end()) {
+                // Matched the QNAME
+                if (domain != domains_.end()) {
+                   RRsetStore::const_iterator found_rrset =
+                       domain->second.find(type);
+                   // Matched the QTYPE
+                   if(found_rrset != domain->second.end()) {
                     return (FindResult(WILDCARD,
                             substituteWild(*found_rrset->second, name)));
-                } else {
-                    found_rrset = domain->second.find(RRType::NSEC());
-                    assert(found_rrset != domain->second.end());
-                    Name newName = Name("*").concatenate(wild_suffix);
-                    return (FindResult(WILDCARD_NXRRSET,
+                   } else {
+                   // No matched QTYPE, this case is for WILDCARD_NXRRSET
+                     found_rrset = domain->second.find(RRType::NSEC());
+                     assert(found_rrset != domain->second.end());
+                     Name newName = Name("*").concatenate(wild_suffix);
+                     return (FindResult(WILDCARD_NXRRSET,
                            substituteWild(*found_rrset->second,newName)));
+                   }
+                 } else {
+                    // This is empty non terminal name case on wildcard.
+                    Name emptyName = Name("*").concatenate(wild_suffix);
+                    for (Domains::reverse_iterator it = domains_.rbegin();
+                        it != domains_.rend();
+                        ++it) {
+                            RRsetStore::const_iterator nsec_it;
+                            if ((*it).first < emptyName &&
+                            (nsec_it = (*it).second.find(RRType::NSEC()))
+                            != (*it).second.end()) {
+                                return (FindResult(WILDCARD_NXRRSET,
+                                                   (*nsec_it).second));
+                            }
+                        }
                 }
-
+                return (FindResult(WILDCARD_NXRRSET,RRsetPtr()));
              }
         }
         const Name cnamewild_suffix("cnamewild.example.com");
@@ -955,7 +984,7 @@ TEST_F(QueryTest, wildcardNxrrsetWithDuplicateNSEC) {
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 4, 0, NULL,
                   (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
                    getCommonRRSIGText("SOA") + "\n" +
-                   string(nsec_wild_txt) + 
+                   string(nsec_wild_txt) +
                    string("*.wild.example.com. 3600 IN RRSIG ") +
                    getCommonRRSIGText("NSEC")+"\n").c_str(),
                   NULL, mock_finder->getOrigin());
@@ -967,11 +996,11 @@ TEST_F(QueryTest, wildcardNxrrsetWithNSEC) {
     // one proves NXDOMAIN and the other proves non existence RRSETs of wildcard.
     Query(memory_client, Name("www1.uwild.example.com"), RRType::TXT(), response,
           true).process();
-    
+
     responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
                   (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
                    getCommonRRSIGText("SOA") + "\n" +
-                   string(nsec_wild_txt_nxrrset) + 
+                   string(nsec_wild_txt_nxrrset) +
                    string("*.uwild.example.com. 3600 IN RRSIG ") +
                    getCommonRRSIGText("NSEC")+"\n" +
                    string(nsec_wild_txt_next) +
@@ -979,6 +1008,26 @@ TEST_F(QueryTest, wildcardNxrrsetWithNSEC) {
                    getCommonRRSIGText("NSEC") + "\n").c_str(),
                   NULL, mock_finder->getOrigin());
 }
+
+TEST_F(QueryTest, wildcardEmptyWithNSEC) {
+    // WILDCARD_EMPTY with DNSSEC proof.  We should have SOA, NSEC that proves the
+    // NXDOMAIN and their RRSIGs. In this case we need two NSEC RRs,
+    // one proves NXDOMAIN and the other proves non existence wildcard.
+    Query(memory_client, Name("a.t.example.com"), RRType::A(), response,
+          true).process();
+
+    responseCheck(response, Rcode::NOERROR(), AA_FLAG, 0, 6, 0, NULL,
+                  (string(soa_txt) + string("example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("SOA") + "\n" +
+                   string(nsec_empty_prev_txt) +
+                   string("t.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n" +
+                   string(nsec_empty_txt) +
+                   string("b.*.t.example.com. 3600 IN RRSIG ") +
+                   getCommonRRSIGText("NSEC")+"\n").c_str(),
+                  NULL, mock_finder->getOrigin());
+}
+
 /*
  * This tests that when there's no SOA and we need a negative answer. It should
  * throw in that case.
diff --git a/src/bin/bind10/bind10_messages.mes b/src/bin/bind10/bind10_messages.mes
index d850e47..79635fd 100644
--- a/src/bin/bind10/bind10_messages.mes
+++ b/src/bin/bind10/bind10_messages.mes
@@ -99,6 +99,12 @@ The boss module is sending a kill signal to process with the given name,
 as part of the process of killing all started processes during a failed
 startup, as described for BIND10_KILLING_ALL_PROCESSES
 
+% BIND10_LOST_SOCKET_CONSUMER consumer %1 of sockets disconnected, considering all its sockets closed
+A connection from one of the applications which requested a socket was
+closed. This means the application has terminated, so all the sockets it was
+using are now closed and bind10 process can release them as well, unless the
+same sockets are used by yet another application.
+
 % BIND10_MSGQ_ALREADY_RUNNING msgq daemon already running, cannot start
 There already appears to be a message bus daemon running. Either an
 old process was not shut down correctly, and needs to be killed, or
@@ -110,6 +116,11 @@ While listening on the message bus channel for messages, it suddenly
 disappeared. The msgq daemon may have died. This might lead to an
 inconsistent state of the system, and BIND 10 will now shut down.
 
+% BIND10_NO_SOCKET couldn't send a socket for token %1 because of error: %2
+An error occurred when the bind10 process was asked to send a socket file
+descriptor. The error is mentioned, most common reason is that the request
+is invalid and may not come from bind10 process at all.
+
 % BIND10_PROCESS_ENDED process %2 of %1 ended with status %3
 This indicates a process started previously terminated. The process id
 and component owning the process are indicated, as well as the exit code.
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index 13cd3e3..00858d8 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -72,6 +72,9 @@ import isc.log
 from isc.log_messages.bind10_messages import *
 import isc.bind10.component
 import isc.bind10.special_component
+import isc.bind10.socket_cache
+import libutil_io_python
+import tempfile
 
 isc.log.init("b10-boss")
 logger = isc.log.Logger("boss")
@@ -81,6 +84,10 @@ logger = isc.log.Logger("boss")
 DBG_PROCESS = logger.DBGLVL_TRACE_BASIC
 DBG_COMMANDS = logger.DBGLVL_TRACE_DETAIL
 
+# Messages sent over the unix domain socket to indicate if it is followed by a real socket
+CREATOR_SOCKET_OK = "1\n"
+CREATOR_SOCKET_UNAVAILABLE = "0\n"
+
 # Assign this process some longer name
 isc.util.process.rename(sys.argv[0])
 
@@ -241,6 +248,12 @@ class BoB:
         # If -v was set, enable full debug logging.
         if self.verbose:
             logger.set_severity("DEBUG", 99)
+        # This is set in init_socket_srv
+        self._socket_path = None
+        self._socket_cache = None
+        self._tmpdir = None
+        self._srv_socket = None
+        self._unix_sockets = {}
 
     def __propagate_component_config(self, config):
         comps = dict(config)
@@ -315,6 +328,18 @@ class BoB:
             elif command == "show_processes":
                 answer = isc.config.ccsession. \
                     create_answer(0, self.get_processes())
+            elif command == "get_socket":
+                answer = self._get_socket(args)
+            elif command == "drop_socket":
+                if "token" not in args:
+                    answer = isc.config.ccsession. \
+                        create_answer(1, "Missing token parameter")
+                else:
+                    try:
+                        self._socket_cache.drop_socket(args["token"])
+                        answer = isc.config.ccsession.create_answer(0)
+                    except Exception as e:
+                        answer = isc.config.ccsession.create_answer(1, str(e))
             else:
                 answer = isc.config.ccsession.create_answer(1,
                                                             "Unknown command")
@@ -574,33 +599,6 @@ class BoB:
         # ... and start
         return self.start_process("b10-resolver", resargs, self.c_channel_env)
 
-    def __ld_path_hack(self):
-        # XXX: a quick-hack workaround.  xfrin/out will implicitly use
-        # dynamically loadable data source modules, which will be installed in
-        # $(libdir).
-        # On some OSes (including MacOS X and *BSDs) the main process (python)
-        # cannot find the modules unless they are located in a common shared
-        # object path or a path in the (DY)LD_LIBRARY_PATH.  We should seek
-        # a cleaner solution, but for a short term workaround we specify the
-        # path here, unconditionally, and without even bothering which
-        # environment variable should be used.
-        #
-        # We reuse the ADD_LIBEXEC_PATH variable to see whether we need to
-        # do this, as the conditions that make this workaround needed are
-        # the same as for the libexec path addition
-        # TODO: Once #1292 is finished, remove this method and the special
-        # component, use it as normal component.
-        env = dict(self.c_channel_env)
-        if ADD_LIBEXEC_PATH:
-            cur_path = os.getenv('DYLD_LIBRARY_PATH')
-            cur_path = '' if cur_path is None else ':' + cur_path
-            env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
-
-            cur_path = os.getenv('LD_LIBRARY_PATH')
-            cur_path = '' if cur_path is None else ':' + cur_path
-            env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
-        return env
-
     def start_cmdctl(self):
         """
             Starts the command control process
@@ -613,22 +611,6 @@ class BoB:
         return self.start_process("b10-cmdctl", args, self.c_channel_env,
                                   self.cmdctl_port)
 
-    def start_xfrin(self):
-        # Set up the command arguments.
-        args = ['b10-xfrin']
-        if self.verbose:
-            args += ['-v']
-
-        return self.start_process("b10-xfrin", args, self.__ld_path_hack())
-
-    def start_xfrout(self):
-        # Set up the command arguments.
-        args = ['b10-xfrout']
-        if self.verbose:
-            args += ['-v']
-
-        return self.start_process("b10-xfrout", args, self.__ld_path_hack())
-
     def start_all_components(self):
         """
             Starts up all the components.  Any exception generated during the
@@ -812,6 +794,209 @@ class BoB:
 
         return next_restart_time
 
+    def _get_socket(self, args):
+        """
+        Implementation of the get_socket CC command. It asks the cache
+        to provide the token and sends the information back.
+        """
+        try:
+            try:
+                addr = isc.net.parse.addr_parse(args['address'])
+                port = isc.net.parse.port_parse(args['port'])
+                protocol = args['protocol']
+                if protocol not in ['UDP', 'TCP']:
+                    raise ValueError("Protocol must be either UDP or TCP")
+                share_mode = args['share_mode']
+                if share_mode not in ['ANY', 'SAMEAPP', 'NO']:
+                    raise ValueError("Share mode must be one of ANY, SAMEAPP" +
+                                     " or NO")
+                share_name = args['share_name']
+            except KeyError as ke:
+                return \
+                    isc.config.ccsession.create_answer(1,
+                                                       "Missing parameter " +
+                                                       str(ke))
+
+            # FIXME: This call contains blocking IPC. It is expected to be
+            # short, but if it turns out to be problem, we'll need to do
+            # something about it.
+            token = self._socket_cache.get_token(protocol, addr, port,
+                                                 share_mode, share_name)
+            return isc.config.ccsession.create_answer(0, {
+                'token': token,
+                'path': self._socket_path
+            })
+        except Exception as e:
+            return isc.config.ccsession.create_answer(1, str(e))
+
+    def socket_request_handler(self, token, unix_socket):
+        """
+        This function handles a token that comes over a unix_domain socket.
+        The function looks into the _socket_cache and sends the socket
+        identified by the token back over the unix_socket.
+        """
+        try:
+            fd = self._socket_cache.get_socket(token, unix_socket.fileno())
+            # FIXME: These two calls are blocking in their nature. An OS-level
+            # buffer is likely to be large enough to hold all these data, but
+            # if it wasn't and the remote application got stuck, we would have
+            # a problem. If there appear such problems, we should do something
+            # about it.
+            unix_socket.sendall(CREATOR_SOCKET_OK)
+            libutil_io_python.send_fd(unix_socket.fileno(), fd)
+        except Exception as e:
+            logger.info(BIND10_NO_SOCKET, token, e)
+            unix_socket.sendall(CREATOR_SOCKET_UNAVAILABLE)
+
+    def socket_consumer_dead(self, unix_socket):
+        """
+        This function handles when a unix_socket closes. This means all
+        sockets sent to it are to be considered closed. This function signals
+        so to the _socket_cache.
+        """
+        logger.info(BIND10_LOST_SOCKET_CONSUMER, unix_socket.fileno())
+        try:
+            self._socket_cache.drop_application(unix_socket.fileno())
+        except ValueError:
+            # This means the application holds no sockets. It's harmless, as it
+            # can happen in real life - for example, it requests a socket, but
+            # get_socket doesn't find it, so the application dies. It should be
+            # rare, though.
+            pass
+
+    def set_creator(self, creator):
+        """
+        Registeres a socket creator into the boss. The socket creator is not
+        used directly, but through a cache. The cache is created in this
+        method.
+
+        If called more than once, it raises a ValueError.
+        """
+        if self._socket_cache is not None:
+            raise ValueError("A creator was inserted previously")
+        self._socket_cache = isc.bind10.socket_cache.Cache(creator)
+
+    def init_socket_srv(self):
+        """
+        Creates and listens on a unix-domain socket to be able to send out
+        the sockets.
+
+        This method should be called after switching user, or the switched
+        applications won't be able to access the socket.
+        """
+        self._srv_socket = socket.socket(socket.AF_UNIX)
+        # We create a temporary directory somewhere safe and unique, to avoid
+        # the need to find the place ourself or bother users. Also, this
+        # secures the socket on some platforms, as it creates a private
+        # directory.
+        self._tmpdir = tempfile.mkdtemp()
+        # Get the name
+        self._socket_path = os.path.join(self._tmpdir, "sockcreator")
+        # And bind the socket to the name
+        self._srv_socket.bind(self._socket_path)
+        self._srv_socket.listen(5)
+
+    def remove_socket_srv(self):
+        """
+        Closes and removes the listening socket and the directory where it
+        lives, as we created both.
+
+        It does nothing if the _srv_socket is not set (eg. it was not yet
+        initialized).
+        """
+        if self._srv_socket is not None:
+            self._srv_socket.close()
+            os.remove(self._socket_path)
+            os.rmdir(self._tmpdir)
+
+    def _srv_accept(self):
+        """
+        Accept a socket from the unix domain socket server and put it to the
+        others we care about.
+        """
+        socket = self._srv_socket.accept()
+        self._unix_sockets[socket.fileno()] = (socket, b'')
+
+    def _socket_data(self, socket_fileno):
+        """
+        This is called when a socket identified by the socket_fileno needs
+        attention. We try to read data from there. If it is closed, we remove
+        it.
+        """
+        (sock, previous) = self._unix_sockets[socket_fileno]
+        while True:
+            try:
+                data = sock.recv(1, socket.MSG_DONTWAIT)
+            except socket.error as se:
+                # These two might be different on some systems
+                if se.errno == errno.EAGAIN or se.errno == errno.EWOULDBLOCK:
+                    # No more data now. Oh, well, just store what we have.
+                    self._unix_sockets[socket_fileno] = (sock, previous)
+                    return
+                else:
+                    data = b'' # Pretend it got closed
+            if len(data) == 0: # The socket got to it's end
+                del self._unix_sockets[socket_fileno]
+                self.socket_consumer_dead(sock)
+                sock.close()
+                return
+            else:
+                if data == b"\n":
+                    # Handle this token and clear it
+                    self.socket_request_handler(previous, sock)
+                    previous = b''
+                else:
+                    previous += data
+
+    def run(self, wakeup_fd):
+        """
+        The main loop, waiting for sockets, commands and dead processes.
+        Runs as long as the runnable is true.
+
+        The wakeup_fd descriptor is the read end of pipe where CHLD signal
+        handler writes.
+        """
+        ccs_fd = self.ccs.get_socket().fileno()
+        while self.runnable:
+            # clean up any processes that exited
+            self.reap_children()
+            next_restart = self.restart_processes()
+            if next_restart is None:
+                wait_time = None
+            else:
+                wait_time = max(next_restart - time.time(), 0)
+
+            # select() can raise EINTR when a signal arrives,
+            # even if they are resumable, so we have to catch
+            # the exception
+            try:
+                (rlist, wlist, xlist) = \
+                    select.select([wakeup_fd, ccs_fd,
+                                   self._srv_socket.fileno()] +
+                                   list(self._unix_sockets.keys()), [], [],
+                                  wait_time)
+            except select.error as err:
+                if err.args[0] == errno.EINTR:
+                    (rlist, wlist, xlist) = ([], [], [])
+                else:
+                    logger.fatal(BIND10_SELECT_ERROR, err)
+                    break
+
+            for fd in rlist + xlist:
+                if fd == ccs_fd:
+                    try:
+                        self.ccs.check_command()
+                    except isc.cc.session.ProtocolError:
+                        logger.fatal(BIND10_MSGQ_DISAPPEARED)
+                        self.runnable = False
+                        break
+                elif fd == wakeup_fd:
+                    os.read(wakeup_fd, 32)
+                elif fd == self._srv_socket.fileno():
+                    self._srv_accept()
+                elif fd in self._unix_sockets:
+                    self._socket_data(fd)
+
 # global variables, needed for signal handlers
 options = None
 boss_of_bind = None
@@ -974,60 +1159,32 @@ def main():
     # Block SIGPIPE, as we don't want it to end this process
     signal.signal(signal.SIGPIPE, signal.SIG_IGN)
 
-    # Go bob!
-    boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
-                       options.config_file, options.nocache, options.verbose,
-                       setuid, username, options.cmdctl_port,
-                       options.wait_time)
-    startup_result = boss_of_bind.startup()
-    if startup_result:
-        logger.fatal(BIND10_STARTUP_ERROR, startup_result)
-        sys.exit(1)
-    logger.info(BIND10_STARTUP_COMPLETE)
-    dump_pid(options.pid_file)
-
-    # In our main loop, we check for dead processes or messages 
-    # on the c-channel.
-    wakeup_fd = wakeup_pipe[0]
-    ccs_fd = boss_of_bind.ccs.get_socket().fileno()
-    while boss_of_bind.runnable:
-        # clean up any processes that exited
-        boss_of_bind.reap_children()
-        next_restart = boss_of_bind.restart_processes()
-        if next_restart is None:
-            wait_time = None
-        else:
-            wait_time = max(next_restart - time.time(), 0)
-
-        # select() can raise EINTR when a signal arrives, 
-        # even if they are resumable, so we have to catch
-        # the exception
-        try:
-            (rlist, wlist, xlist) = select.select([wakeup_fd, ccs_fd], [], [], 
-                                                  wait_time)
-        except select.error as err:
-            if err.args[0] == errno.EINTR:
-                (rlist, wlist, xlist) = ([], [], [])
-            else:
-                logger.fatal(BIND10_SELECT_ERROR, err)
-                break
-
-        for fd in rlist + xlist:
-            if fd == ccs_fd:
-                try:
-                    boss_of_bind.ccs.check_command()
-                except isc.cc.session.ProtocolError:
-                    logger.fatal(BIND10_MSGQ_DISAPPEARED)
-                    self.runnable = False
-                    break
-            elif fd == wakeup_fd:
-                os.read(wakeup_fd, 32)
-
-    # shutdown
-    signal.signal(signal.SIGCHLD, signal.SIG_DFL)
-    boss_of_bind.shutdown()
-    unlink_pid_file(options.pid_file)
-    sys.exit(0)
+    try:
+        # Go bob!
+        boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
+                           options.config_file, options.nocache,
+                           options.verbose, setuid, username,
+                           options.cmdctl_port, options.wait_time)
+        startup_result = boss_of_bind.startup()
+        if startup_result:
+            logger.fatal(BIND10_STARTUP_ERROR, startup_result)
+            sys.exit(1)
+        boss_of_bind.init_socket_srv()
+        logger.info(BIND10_STARTUP_COMPLETE)
+        dump_pid(options.pid_file)
+
+        # Let it run
+        boss_of_bind.run(wakeup_pipe[0])
+
+        # shutdown
+        signal.signal(signal.SIGCHLD, signal.SIG_DFL)
+        boss_of_bind.shutdown()
+    finally:
+        # Clean up the filesystem
+        unlink_pid_file(options.pid_file)
+        if boss_of_bind is not None:
+            boss_of_bind.remove_socket_srv()
+    sys.exit(boss_of_bind.exitcode)
 
 if __name__ == "__main__":
     main()
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 4267b70..adc9798 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -14,8 +14,8 @@
             "priority": 5,
             "kind": "dispensable"
           },
-          "b10-xfrin": { "special": "xfrin", "kind": "dispensable" },
-          "b10-xfrout": { "special": "xfrout", "kind": "dispensable" },
+          "b10-xfrin": { "address": "Xfrin", "kind": "dispensable" },
+          "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
           "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
           "b10-stats": { "address": "Stats", "kind": "dispensable" },
           "b10-stats-httpd": {
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index b7b741c..f9537fd 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -13,7 +13,11 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
+# Most of the time, we omit the "bind10_src" for brevity. Sometimes,
+# we want to be explicit about what we do, like when hijacking a library
+# call used by the bind10_src.
 from bind10_src import ProcessInfo, BoB, parse_args, dump_pid, unlink_pid_file, _BASETIME
+import bind10_src
 
 # XXX: environment tests are currently disabled, due to the preprocessor
 #      setup that we have now complicating the environment
@@ -28,6 +32,8 @@ from isc.net.addr import IPAddr
 import time
 import isc
 import isc.log
+import isc.bind10.socket_cache
+import errno
 
 from isc.testutils.parse_args import TestOptParser, OptsError
 
@@ -97,6 +103,232 @@ class TestProcessInfo(unittest.TestCase):
         self.assertTrue(type(pi.pid) is int)
         self.assertNotEqual(pi.pid, old_pid)
 
+class TestCacheCommands(unittest.TestCase):
+    """
+    Test methods of boss related to the socket cache and socket handling.
+    """
+    def setUp(self):
+        """
+        Prepare the boss for some tests.
+
+        Also prepare some variables we need.
+        """
+        self.__boss = BoB()
+        # Fake the cache here so we can pretend it is us and hijack the
+        # calls to its methods.
+        self.__boss._socket_cache = self
+        self.__boss._socket_path = '/socket/path'
+        self.__raise_exception = None
+        self.__socket_args = {
+            "port": 53,
+            "address": "::",
+            "protocol": "UDP",
+            "share_mode": "ANY",
+            "share_name": "app"
+        }
+        # What was and wasn't called.
+        self.__drop_app_called = None
+        self.__get_socket_called = None
+        self.__send_fd_called = None
+        self.__get_token_called = None
+        self.__drop_socket_called = None
+        bind10_src.libutil_io_python.send_fd = self.__send_fd
+
+    def __send_fd(self, to, socket):
+        """
+        A function to hook the send_fd in the bind10_src.
+        """
+        self.__send_fd_called = (to, socket)
+
+    class FalseSocket:
+        """
+        A socket where we can fake methods we need instead of having a real
+        socket.
+        """
+        def __init__(self):
+            self.send = ""
+        def fileno(self):
+            """
+            The file number. Used for identifying the remote application.
+            """
+            return 42
+
+        def sendall(self, data):
+            """
+            Adds data to the self.send.
+            """
+            self.send += data
+
+    def drop_application(self, application):
+        """
+        Part of pretending to be the cache. Logs the parameter to
+        self.__drop_app_called.
+
+        In the case self.__raise_exception is set, the exception there
+        is raised instead.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__drop_app_called = application
+
+    def test_consumer_dead(self):
+        """
+        Test that it calls the drop_application method of the cache.
+        """
+        self.__boss.socket_consumer_dead(self.FalseSocket())
+        self.assertEqual(42, self.__drop_app_called)
+
+    def test_consumer_dead_invalid(self):
+        """
+        Test that it doesn't crash in case the application is not known to
+        the cache, the boss doesn't crash, as this actually can happen in
+        practice.
+        """
+        self.__raise_exception = ValueError("This application is unknown")
+        # This doesn't crash
+        self.__boss.socket_consumer_dead(self.FalseSocket())
+
+    def get_socket(self, token, application):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the call is logged
+        into __get_socket_called and a number is returned.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__get_socket_called = (token, application)
+        return 13
+
+    def test_request_handler(self):
+        """
+        Test that a request for socket is forwarded and the socket is sent
+        back, if it returns a socket.
+        """
+        socket = self.FalseSocket()
+        # An exception from the cache
+        self.__raise_exception = ValueError("Test value error")
+        self.__boss.socket_request_handler("token", socket)
+        # It was called, but it threw, so it is not noted here
+        self.assertIsNone(self.__get_socket_called)
+        self.assertEqual("0\n", socket.send)
+        # It should not have sent any socket.
+        self.assertIsNone(self.__send_fd_called)
+        # Now prepare a valid scenario
+        self.__raise_exception = None
+        socket.send = ""
+        self.__boss.socket_request_handler("token", socket)
+        self.assertEqual("1\n", socket.send)
+        self.assertEqual((42, 13), self.__send_fd_called)
+        self.assertEqual(("token", 42), self.__get_socket_called)
+
+    def get_token(self, protocol, address, port, share_mode, share_name):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the parameters are
+        logged into __get_token_called and a token is returned.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__get_token_called = (protocol, address, port, share_mode,
+                                   share_name)
+        return "token"
+
+    def test_get_socket_ok(self):
+        """
+        Test the successful scenario of getting a socket.
+        """
+        result = self.__boss._get_socket(self.__socket_args)
+        [code, answer] = result['result']
+        self.assertEqual(0, code)
+        self.assertEqual({
+            'token': 'token',
+            'path': '/socket/path'
+        }, answer)
+        addr = self.__get_token_called[1]
+        self.assertTrue(isinstance(addr, IPAddr))
+        self.assertEqual("::", str(addr))
+        self.assertEqual(("UDP", addr, 53, "ANY", "app"),
+                         self.__get_token_called)
+
+    def test_get_socket_error(self):
+        """
+        Test that bad inputs are handled correctly, etc.
+        """
+        def check_code(code, args):
+            """
+            Pass the args there and check if it returns success or not.
+
+            The rest is not tested, as it is already checked in the
+            test_get_socket_ok.
+            """
+            [rcode, ranswer] = self.__boss._get_socket(args)['result']
+            self.assertEqual(code, rcode)
+            if code == 1:
+                # This should be an error message. The exact formatting
+                # is unknown, but we check it is string at least
+                self.assertTrue(isinstance(ranswer, str))
+        def mod_args(name, value):
+            """
+            Override a parameter in the args.
+            """
+            result = dict(self.__socket_args)
+            result[name] = value
+            return result
+
+        # Port too large
+        check_code(1, mod_args('port', 65536))
+        # Not numeric address
+        check_code(1, mod_args('address', 'example.org.'))
+        # Some bad values of enum-like params
+        check_code(1, mod_args('protocol', 'BAD PROTO'))
+        check_code(1, mod_args('share_mode', 'BAD SHARE'))
+        # Check missing parameters
+        for param in self.__socket_args.keys():
+            args = dict(self.__socket_args)
+            del args[param]
+            check_code(1, args)
+        # These are OK values for the enum-like parameters
+        # The ones from test_get_socket_ok are not tested here
+        check_code(0, mod_args('protocol', 'TCP'))
+        check_code(0, mod_args('share_mode', 'SAMEAPP'))
+        check_code(0, mod_args('share_mode', 'NO'))
+        # If an exception is raised from within the cache, it is converted
+        # to an error, not propagated
+        self.__raise_exception = Exception("Test exception")
+        check_code(1, self.__socket_args)
+
+    def drop_socket(self, token):
+        """
+        Part of pretending to be the cache. If there's anything in
+        __raise_exception, it is raised. Otherwise, the parameter is stored
+        in __drop_socket_called.
+        """
+        if self.__raise_exception is not None:
+            raise self.__raise_exception
+        self.__drop_socket_called = token
+
+    def test_drop_socket(self):
+        """
+        Check the drop_socket command. It should directly call the method
+        on the cache. Exceptions should be translated to error messages.
+        """
+        # This should be OK and just propagated to the call.
+        self.assertEqual({"result": [0]},
+                         self.__boss.command_handler("drop_socket",
+                                                     {"token": "token"}))
+        self.assertEqual("token", self.__drop_socket_called)
+        self.__drop_socket_called = None
+        # Missing parameter
+        self.assertEqual({"result": [1, "Missing token parameter"]},
+                         self.__boss.command_handler("drop_socket", {}))
+        self.assertIsNone(self.__drop_socket_called)
+        # An exception is raised from within the cache
+        self.__raise_exception = ValueError("Test error")
+        self.assertEqual({"result": [1, "Test error"]},
+                         self.__boss.command_handler("drop_socket",
+                         {"token": "token"}))
+
+
 class TestBoB(unittest.TestCase):
     def test_init(self):
         bob = BoB()
@@ -109,6 +341,22 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.uid, None)
         self.assertEqual(bob.username, None)
         self.assertEqual(bob.nocache, False)
+        self.assertIsNone(bob._socket_cache)
+
+    def test_set_creator(self):
+        """
+        Test the call to set_creator. First time, the cache is created
+        with the passed creator. The next time, it throws an exception.
+        """
+        bob = BoB()
+        # The cache doesn't use it at start, so just create an empty class
+        class Creator: pass
+        creator = Creator()
+        bob.set_creator(creator)
+        self.assertTrue(isinstance(bob._socket_cache,
+                        isc.bind10.socket_cache.Cache))
+        self.assertEqual(creator, bob._socket_cache._creator)
+        self.assertRaises(ValueError, bob.set_creator, creator)
 
     def test_init_alternate_socket(self):
         bob = BoB("alt_socket_file")
@@ -183,6 +431,26 @@ class TestBoB(unittest.TestCase):
         self.assertEqual(bob.command_handler("__UNKNOWN__", None),
                          isc.config.ccsession.create_answer(1, "Unknown command"))
 
+        # Fake the get_token of cache and test the command works
+        bob._socket_path = '/socket/path'
+        class cache:
+            def get_token(self, protocol, addr, port, share_mode, share_name):
+                return str(addr) + ':' + str(port)
+        bob._socket_cache = cache()
+        args = {
+            "port": 53,
+            "address": "0.0.0.0",
+            "protocol": "UDP",
+            "share_mode": "ANY",
+            "share_name": "app"
+        }
+        # at all and this is the easiest way to check.
+        self.assertEqual({'result': [0, {'token': '0.0.0.0:53',
+                                         'path': '/socket/path'}]},
+                         bob.command_handler("get_socket", args))
+        # The drop_socket is not tested here, but in TestCacheCommands.
+        # It needs the cache mocks to be in place and they are there.
+
 # Class for testing the BoB without actually starting processes.
 # This is used for testing the start/stop components routines and
 # the BoB commands.
@@ -268,7 +536,9 @@ class MockBob(BoB):
                     'b10-stats-httpd': self.start_stats_httpd,
                     'b10-cmdctl': self.start_cmdctl,
                     'b10-dhcp6': self.start_dhcp6,
-                    'b10-dhcp4': self.start_dhcp4 }
+                    'b10-dhcp4': self.start_dhcp4,
+                    'b10-xfrin': self.start_xfrin,
+                    'b10-xfrout': self.start_xfrout }
         return procmap[name]()
 
     def start_xfrout(self):
@@ -463,8 +733,9 @@ class TestStartStopProcessesBob(unittest.TestCase):
         if start_auth:
             config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
             config['b10-xfrout'] = { 'kind': 'dispensable',
-                                     'special': 'xfrout' }
-            config['b10-xfrin'] = { 'kind': 'dispensable', 'special': 'xfrin' }
+                                     'address': 'Xfrout' }
+            config['b10-xfrin'] = { 'kind': 'dispensable',
+                                    'address': 'Xfrin' }
             config['b10-zonemgr'] = { 'kind': 'dispensable',
                                       'address': 'Zonemgr' }
         if start_resolver:
@@ -928,6 +1199,201 @@ class TestBossComponents(unittest.TestCase):
         bob.start_all_components()
         self.__check_extended(self.__param)
 
+class SocketSrvTest(unittest.TestCase):
+    """
+    This tests some methods of boss related to the unix domain sockets used
+    to transfer other sockets to applications.
+    """
+    def setUp(self):
+        """
+        Create the boss to test, testdata and backup some functions.
+        """
+        self.__boss = BoB()
+        self.__select_backup = bind10_src.select.select
+        self.__select_called = None
+        self.__socket_data_called = None
+        self.__consumer_dead_called = None
+        self.__socket_request_handler_called = None
+
+    def tearDown(self):
+        """
+        Restore functions.
+        """
+        bind10_src.select.select = self.__select_backup
+
+    class __FalseSocket:
+        """
+        A mock socket for the select and accept and stuff like that.
+        """
+        def __init__(self, owner, fileno=42):
+            self.__owner = owner
+            self.__fileno = fileno
+            self.data = None
+            self.closed = False
+
+        def fileno(self):
+            return self.__fileno
+
+        def accept(self):
+            return self.__class__(self.__owner, 13)
+
+        def recv(self, bufsize, flags=0):
+            self.__owner.assertEqual(1, bufsize)
+            self.__owner.assertEqual(socket.MSG_DONTWAIT, flags)
+            if isinstance(self.data, socket.error):
+                raise self.data
+            elif self.data is not None:
+                if len(self.data):
+                    result = self.data[0:1]
+                    self.data = self.data[1:]
+                    return result
+                else:
+                    raise socket.error(errno.EAGAIN, "Would block")
+            else:
+                return b''
+
+        def close(self):
+            self.closed = True
+
+    class __CCS:
+        """
+        A mock CCS, just to provide the socket file number.
+        """
+        class __Socket:
+            def fileno(self):
+                return 1
+        def get_socket(self):
+            return self.__Socket()
+
+    def __select_accept(self, r, w, x, t):
+        self.__select_called = (r, w, x, t)
+        return ([42], [], [])
+
+    def __select_data(self, r, w, x, t):
+        self.__select_called = (r, w, x, t)
+        return ([13], [], [])
+
+    def __accept(self):
+        """
+        Hijact the accept method of the boss.
+
+        Notes down it was called and stops the boss.
+        """
+        self.__accept_called = True
+        self.__boss.runnable = False
+
+    def test_srv_accept_called(self):
+        """
+        Test that the _srv_accept method of boss is called when the listening
+        socket is readable.
+        """
+        self.__boss.runnable = True
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._srv_accept = self.__accept
+        self.__boss.ccs = self.__CCS()
+        bind10_src.select.select = self.__select_accept
+        self.__boss.run(2)
+        # It called the accept
+        self.assertTrue(self.__accept_called)
+        # And the select had the right parameters
+        self.assertEqual(([2, 1, 42], [], [], None), self.__select_called)
+
+    def test_srv_accept(self):
+        """
+        Test how the _srv_accept method works.
+        """
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._srv_accept()
+        # After we accepted, a new socket is added there
+        socket = self.__boss._unix_sockets[13][0]
+        # The socket is properly stored there
+        self.assertTrue(isinstance(socket, self.__FalseSocket))
+        # And the buffer (yet empty) is there
+        self.assertEqual({13: (socket, b'')}, self.__boss._unix_sockets)
+
+    def __socket_data(self, socket):
+        self.__boss.runnable = False
+        self.__socket_data_called = socket
+
+    def test_socket_data(self):
+        """
+        Test that a socket that wants attention gets it.
+        """
+        self.__boss._srv_socket = self.__FalseSocket(self)
+        self.__boss._socket_data = self.__socket_data
+        self.__boss.ccs = self.__CCS()
+        self.__boss._unix_sockets = {13: (self.__FalseSocket(self, 13), b'')}
+        self.__boss.runnable = True
+        bind10_src.select.select = self.__select_data
+        self.__boss.run(2)
+        self.assertEqual(13, self.__socket_data_called)
+        self.assertEqual(([2, 1, 42, 13], [], [], None), self.__select_called)
+
+    def __prepare_data(self, data):
+        socket = self.__FalseSocket(self, 13)
+        self.__boss._unix_sockets = {13: (socket, b'')}
+        socket.data = data
+        self.__boss.socket_consumer_dead = self.__consumer_dead
+        self.__boss.socket_request_handler = self.__socket_request_handler
+        return socket
+
+    def __consumer_dead(self, socket):
+        self.__consumer_dead_called = socket
+
+    def __socket_request_handler(self, token, socket):
+        self.__socket_request_handler_called = (token, socket)
+
+    def test_socket_closed(self):
+        """
+        Test that a socket is removed and the socket_consumer_dead is called
+        when it is closed.
+        """
+        socket = self.__prepare_data(None)
+        self.__boss._socket_data(13)
+        self.assertEqual(socket, self.__consumer_dead_called)
+        self.assertEqual({}, self.__boss._unix_sockets)
+        self.assertTrue(socket.closed)
+
+    def test_socket_short(self):
+        """
+        Test that if there's not enough data to get the whole socket, it is
+        kept there, but nothing is called.
+        """
+        socket = self.__prepare_data(b'tok')
+        self.__boss._socket_data(13)
+        self.assertEqual({13: (socket, b'tok')}, self.__boss._unix_sockets)
+        self.assertFalse(socket.closed)
+        self.assertIsNone(self.__consumer_dead_called)
+        self.assertIsNone(self.__socket_request_handler_called)
+
+    def test_socket_continue(self):
+        """
+        Test that we call the token handling function when the whole token
+        comes. This test pretends to continue reading where the previous one
+        stopped.
+        """
+        socket = self.__prepare_data(b"en\nanothe")
+        # The data to finish
+        self.__boss._unix_sockets[13] = (socket, b'tok')
+        self.__boss._socket_data(13)
+        self.assertEqual({13: (socket, b'anothe')}, self.__boss._unix_sockets)
+        self.assertFalse(socket.closed)
+        self.assertIsNone(self.__consumer_dead_called)
+        self.assertEqual((b'token', socket),
+                         self.__socket_request_handler_called)
+
+    def test_broken_socket(self):
+        """
+        If the socket raises an exception during the read other than EAGAIN,
+        it is broken and we remove it.
+        """
+        sock = self.__prepare_data(socket.error(errno.ENOMEM,
+            "There's more memory available, but not for you"))
+        self.__boss._socket_data(13)
+        self.assertEqual(sock, self.__consumer_dead_called)
+        self.assertEqual({}, self.__boss._unix_sockets)
+        self.assertTrue(sock.closed)
+
 if __name__ == '__main__':
     # store os.environ for test_unchanged_environment
     original_os_environ = copy.deepcopy(os.environ)
diff --git a/src/bin/dhcp6/.gitignore b/src/bin/dhcp6/.gitignore
index 6a6060b..e4e8f2d 100644
--- a/src/bin/dhcp6/.gitignore
+++ b/src/bin/dhcp6/.gitignore
@@ -7,3 +7,4 @@ Makefile.in
 b10-dhcp6
 spec_config.h
 spec_config.h.pre
+tests/dhcp6_unittests
diff --git a/src/bin/dhcp6/dhcp6_srv.cc b/src/bin/dhcp6/dhcp6_srv.cc
index ba5afec..d5a969f 100644
--- a/src/bin/dhcp6/dhcp6_srv.cc
+++ b/src/bin/dhcp6/dhcp6_srv.cc
@@ -12,26 +12,32 @@
 // OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
 // PERFORMANCE OF THIS SOFTWARE.
 
-#include "dhcp/dhcp6.h"
-#include "dhcp/pkt6.h"
-#include "dhcp6/iface_mgr.h"
-#include "dhcp6/dhcp6_srv.h"
-#include "dhcp/option6_ia.h"
-#include "dhcp/option6_iaaddr.h"
-#include "asiolink/io_address.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp6/iface_mgr.h>
+#include <dhcp6/dhcp6_srv.h>
+#include <dhcp/option6_ia.h>
+#include <dhcp/option6_iaaddr.h>
+#include <asiolink/io_address.h>
+#include <exceptions/exceptions.h>
 
 using namespace std;
 using namespace isc;
 using namespace isc::dhcp;
 using namespace isc::asiolink;
 
-Dhcpv6Srv::Dhcpv6Srv() {
+Dhcpv6Srv::Dhcpv6Srv(uint16_t port) {
+
+//void Dhcpv6Srv::Dhcpv6Srv_impl(uint16_t port) {
     cout << "Initialization" << endl;
 
-    // first call to instance() will create IfaceMgr (it's a singleton)
-    // it may throw something if things go wrong
+    // First call to instance() will create IfaceMgr (it's a singleton).
+    // It may throw something if things go wrong.
     IfaceMgr::instance();
 
+    // Now try to open IPv6 sockets on detected interfaces.
+    IfaceMgr::instance().openSockets(port);
+
     /// @todo: instantiate LeaseMgr here once it is imlpemented.
 
     setServerID();
@@ -41,6 +47,8 @@ Dhcpv6Srv::Dhcpv6Srv() {
 
 Dhcpv6Srv::~Dhcpv6Srv() {
     cout << "DHCPv6 Srv shutdown." << endl;
+
+    IfaceMgr::instance().closeSockets();
 }
 
 bool
@@ -49,7 +57,7 @@ Dhcpv6Srv::run() {
         boost::shared_ptr<Pkt6> query; // client's message
         boost::shared_ptr<Pkt6> rsp;   // server's response
 
-        query = IfaceMgr::instance().receive();
+        query = IfaceMgr::instance().receive6();
 
         if (query) {
             if (!query->unpack()) {
diff --git a/src/bin/dhcp6/dhcp6_srv.h b/src/bin/dhcp6/dhcp6_srv.h
index 4daef3a..bcc7818 100644
--- a/src/bin/dhcp6/dhcp6_srv.h
+++ b/src/bin/dhcp6/dhcp6_srv.h
@@ -17,8 +17,9 @@
 
 #include <boost/shared_ptr.hpp>
 #include <boost/noncopyable.hpp>
-#include "dhcp/pkt6.h"
-#include "dhcp/option.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp/pkt6.h>
+#include <dhcp/option.h>
 #include <iostream>
 
 namespace isc {
@@ -41,10 +42,12 @@ public:
     /// In particular, creates IfaceMgr that will be responsible for
     /// network interaction. Will instantiate lease manager, and load
     /// old or create new DUID.
-    Dhcpv6Srv();
+    ///
+    /// @param port port on will all sockets will listen
+    Dhcpv6Srv(uint16_t port = DHCP6_SERVER_PORT);
 
     /// @brief Destructor. Used during DHCPv6 service shutdown.
-    ~Dhcpv6Srv();
+    virtual ~Dhcpv6Srv();
 
     /// @brief Returns server-intentifier option
     ///
diff --git a/src/bin/dhcp6/iface_mgr.cc b/src/bin/dhcp6/iface_mgr.cc
index a96db07..60dac63 100644
--- a/src/bin/dhcp6/iface_mgr.cc
+++ b/src/bin/dhcp6/iface_mgr.cc
@@ -18,9 +18,9 @@
 #include <netinet/in.h>
 #include <arpa/inet.h>
 
-#include "dhcp/dhcp6.h"
-#include "dhcp6/iface_mgr.h"
-#include "exceptions/exceptions.h"
+#include <dhcp/dhcp6.h>
+#include <dhcp6/iface_mgr.h>
+#include <exceptions/exceptions.h>
 
 using namespace std;
 using namespace isc;
@@ -79,6 +79,30 @@ IfaceMgr::Iface::getPlainMac() const {
     return (tmp.str());
 }
 
+bool IfaceMgr::Iface::delAddress(const isc::asiolink::IOAddress& addr) {
+
+    // Let's delete all addresses that match. It really shouldn't matter
+    // if we delete first or all, as the OS should allow to add a single
+    // address to an interface only once. If OS allows multiple instances
+    // of the same address added, we are in deep problems anyway.
+    size_t size = addrs_.size();
+    addrs_.erase(remove(addrs_.begin(), addrs_.end(), addr), addrs_.end());
+    return (addrs_.size() < size);
+}
+
+bool IfaceMgr::Iface::delSocket(uint16_t sockfd) {
+    list<SocketInfo>::iterator sock = sockets_.begin();
+    while (sock!=sockets_.end()) {
+        if (sock->sockfd_ == sockfd) {
+            close(sockfd);
+            sockets_.erase(sock);
+            return (true); //socket found
+        }
+        ++sock;
+    }
+    return (false); // socket not found
+}
+
 IfaceMgr::IfaceMgr()
     :control_buf_len_(CMSG_SPACE(sizeof(struct in6_pktinfo))),
      control_buf_(new char[control_buf_len_])
@@ -95,9 +119,6 @@ IfaceMgr::IfaceMgr()
 
         detectIfaces();
 
-        if (!openSockets()) {
-            isc_throw(Unexpected, "Failed to open/bind sockets.");
-        }
     } catch (const std::exception& ex) {
         cout << "IfaceMgr creation failed:" << ex.what() << endl;
 
@@ -109,7 +130,23 @@ IfaceMgr::IfaceMgr()
     }
 }
 
+void IfaceMgr::closeSockets() {
+    for (IfaceCollection::iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
+
+        for (SocketCollection::iterator sock = iface->sockets_.begin();
+             sock != iface->sockets_.end(); ++sock) {
+            cout << "Closing socket " << sock->sockfd_ << endl;
+            close(sock->sockfd_);
+        }
+        iface->sockets_.clear();
+    }
+
+}
+
 IfaceMgr::~IfaceMgr() {
+    closeSockets();
+
     // control_buf_ is deleted automatically (scoped_ptr)
     control_buf_len_ = 0;
 }
@@ -139,8 +176,8 @@ IfaceMgr::detectIfaces() {
 
         Iface iface(ifaceName, if_nametoindex( ifaceName.c_str() ) );
         IOAddress addr(linkLocal);
-        iface.addrs_.push_back(addr);
-        ifaces_.push_back(iface);
+        iface.addAddress(addr);
+        addInterface(iface);
         interfaces.close();
     } catch (const std::exception& ex) {
         // TODO: deallocate whatever memory we used
@@ -154,51 +191,55 @@ IfaceMgr::detectIfaces() {
     }
 }
 
-bool
-IfaceMgr::openSockets() {
-    int sock;
+void
+IfaceMgr::openSockets(uint16_t port) {
+    int sock1, sock2;
+
+    for (IfaceCollection::iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
 
-    for (IfaceLst::iterator iface=ifaces_.begin();
-         iface!=ifaces_.end();
-         ++iface) {
+        AddressCollection addrs = iface->getAddresses();
 
-        for (Addr6Lst::iterator addr=iface->addrs_.begin();
-             addr!=iface->addrs_.end();
+        for (AddressCollection::iterator addr = addrs.begin();
+             addr != addrs.end();
              ++addr) {
 
-            sock = openSocket(iface->name_, *addr,
-                              DHCP6_SERVER_PORT);
-            if (sock<0) {
-                cout << "Failed to open unicast socket." << endl;
-                return (false);
+            sock1 = openSocket(iface->getName(), *addr, port);
+            if (sock1 < 0) {
+                isc_throw(Unexpected, "Failed to open unicast socket on "
+                          << " interface " << iface->getFullName());
             }
-            sendsock_ = sock;
-
-            sock = openSocket(iface->name_,
-                              IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
-                              DHCP6_SERVER_PORT);
-            if (sock<0) {
-                cout << "Failed to open multicast socket." << endl;
-                close(sendsock_);
-                return (false);
+
+            if ( !joinMcast(sock1, iface->getName(),
+                             string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
+                close(sock1);
+                isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+                          << " multicast group.");
+            }
+
+            // this doesn't work too well on NetBSD
+            sock2 = openSocket(iface->getName(),
+                               IOAddress(ALL_DHCP_RELAY_AGENTS_AND_SERVERS),
+                               port);
+            if (sock2 < 0) {
+                isc_throw(Unexpected, "Failed to open multicast socket on "
+                          << " interface " << iface->getFullName());
+                iface->delSocket(sock1); // delete previously opened socket
             }
-            recvsock_ = sock;
         }
     }
-
-    return (true);
 }
 
 void
 IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
-    for (IfaceLst::const_iterator iface=ifaces_.begin();
-         iface!=ifaces_.end();
-         ++iface) {
+    for (IfaceCollection::const_iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
         out << "Detected interface " << iface->getFullName() << endl;
-        out << "  " << iface->addrs_.size() << " addr(s):" << endl;
-        for (Addr6Lst::const_iterator addr=iface->addrs_.begin();
-             addr != iface->addrs_.end();
-             ++addr) {
+        out << "  " << iface->getAddresses().size() << " addr(s):" << endl;
+        const AddressCollection addrs = iface->getAddresses();
+
+        for (AddressCollection::const_iterator addr = addrs.begin();
+             addr != addrs.end(); ++addr) {
             out << "  " << addr->toText() << endl;
         }
         out << "  mac: " << iface->getPlainMac() << endl;
@@ -207,11 +248,11 @@ IfaceMgr::printIfaces(std::ostream& out /*= std::cout*/) {
 
 IfaceMgr::Iface*
 IfaceMgr::getIface(int ifindex) {
-    for (IfaceLst::iterator iface=ifaces_.begin();
-         iface!=ifaces_.end();
-         ++iface) {
-        if (iface->ifindex_ == ifindex)
+    for (IfaceCollection::iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
+        if (iface->getIndex() == ifindex) {
             return (&(*iface));
+        }
     }
 
     return (NULL); // not found
@@ -219,29 +260,87 @@ IfaceMgr::getIface(int ifindex) {
 
 IfaceMgr::Iface*
 IfaceMgr::getIface(const std::string& ifname) {
-    for (IfaceLst::iterator iface=ifaces_.begin();
-         iface!=ifaces_.end();
-         ++iface) {
-        if (iface->name_ == ifname)
+    for (IfaceCollection::iterator iface = ifaces_.begin();
+         iface != ifaces_.end(); ++iface) {
+        if (iface->getName() == ifname) {
             return (&(*iface));
+        }
     }
 
     return (NULL); // not found
 }
 
 int
-IfaceMgr::openSocket(const std::string& ifname,
-                     const IOAddress& addr,
+IfaceMgr::openSocket(const std::string& ifname, const IOAddress& addr,
                      int port) {
-    struct sockaddr_in6 addr6;
+    Iface* iface = getIface(ifname);
+    if (!iface) {
+        isc_throw(BadValue, "There is no " << ifname << " interface present.");
+    }
+    switch (addr.getFamily()) {
+    case AF_INET:
+        return openSocket4(*iface, addr, port);
+    case AF_INET6:
+        return openSocket6(*iface, addr, port);
+    default:
+        isc_throw(BadValue, "Failed to detect family of address: "
+                  << addr.toText());
+    }
+}
+
+int
+IfaceMgr::openSocket4(Iface& iface, const IOAddress& addr, int port) {
+
+    cout << "Creating UDP4 socket on " << iface.getFullName()
+         << " " << addr.toText() << "/port=" << port << endl;
+
+    struct sockaddr_in addr4;
+    memset(&addr4, 0, sizeof(sockaddr));
+    addr4.sin_family = AF_INET;
+    addr4.sin_port = htons(port);
+    memcpy(&addr4.sin_addr, addr.getAddress().to_v4().to_bytes().data(),
+           sizeof(addr4.sin_addr));
+
+    int sock = socket(AF_INET, SOCK_DGRAM, 0);
+    if (sock < 0) {
+        isc_throw(Unexpected, "Failed to create UDP6 socket.");
+    }
+
+    if (bind(sock, (struct sockaddr *)&addr4, sizeof(addr4)) < 0) {
+        close(sock);
+        isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+                  << "/port=" << port);
+    }
+
+    // If there is no support for IP_PKTINFO, we are really out of luck.
+    // It will be difficult to understand, where this packet came from.
+#if defined(IP_PKTINFO)
+    int flag = 1;
+    if (setsockopt(sock, IPPROTO_IP, IP_PKTINFO, &flag, sizeof(flag)) != 0) {
+        close(sock);
+        isc_throw(Unexpected, "setsockopt: IP_PKTINFO: failed.");
+    }
+#endif
+
+    cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
+        addr.toText() << "/port=" << port << endl;
 
-    cout << "Creating socket on " << ifname << "/" << addr.toText()
-         << "/port=" << port << endl;
+    iface.addSocket(SocketInfo(sock, addr, port));
 
+    return (sock);
+}
+
+int
+IfaceMgr::openSocket6(Iface& iface, const IOAddress& addr, int port) {
+
+    cout << "Creating UDP6 socket on " << iface.getFullName()
+         << " " << addr.toText() << "/port=" << port << endl;
+
+    struct sockaddr_in6 addr6;
     memset(&addr6, 0, sizeof(addr6));
     addr6.sin6_family = AF_INET6;
     addr6.sin6_port = htons(port);
-    addr6.sin6_scope_id = if_nametoindex(ifname.c_str());
+    addr6.sin6_scope_id = if_nametoindex(iface.getName().c_str());
 
     memcpy(&addr6.sin6_addr,
            addr.getAddress().to_v6().to_bytes().data(),
@@ -255,61 +354,58 @@ IfaceMgr::openSocket(const std::string& ifname,
     // make a socket
     int sock = socket(AF_INET6, SOCK_DGRAM, 0);
     if (sock < 0) {
-        cout << "Failed to create UDP6 socket." << endl;
-        return (-1);
+        isc_throw(Unexpected, "Failed to create UDP6 socket.");
     }
 
-    /* Set the REUSEADDR option so that we don't fail to start if
-       we're being restarted. */
+    // Set the REUSEADDR option so that we don't fail to start if
+    // we're being restarted.
     int flag = 1;
     if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
                    (char *)&flag, sizeof(flag)) < 0) {
-        cout << "Can't set SO_REUSEADDR option on dhcpv6 socket." << endl;
         close(sock);
-        return (-1);
+        isc_throw(Unexpected, "Can't set SO_REUSEADDR option on dhcpv6 socket.");
     }
 
     if (bind(sock, (struct sockaddr *)&addr6, sizeof(addr6)) < 0) {
-        cout << "Failed to bind socket " << sock << " to " << addr.toText()
-             << "/port=" << port << endl;
         close(sock);
-        return (-1);
+        isc_throw(Unexpected, "Failed to bind socket " << sock << " to " << addr.toText()
+                  << "/port=" << port);
     }
 #ifdef IPV6_RECVPKTINFO
-    /* RFC3542 - a new way */
+    // RFC3542 - a new way
     if (setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO,
                    &flag, sizeof(flag)) != 0) {
-        cout << "setsockopt: IPV6_RECVPKTINFO failed." << endl;
         close(sock);
-        return (-1);
+        isc_throw(Unexpected, "setsockopt: IPV6_RECVPKTINFO failed.");
     }
 #else
-    /* RFC2292 - an old way */
+    // RFC2292 - an old way
     if (setsockopt(sock, IPPROTO_IPV6, IPV6_PKTINFO,
                    &flag, sizeof(flag)) != 0) {
-        cout << "setsockopt: IPV6_PKTINFO: failed." << endl;
         close(sock);
-        return (-1);
+        isc_throw(Unexpected, "setsockopt: IPV6_PKTINFO: failed.");
     }
 #endif
 
     // multicast stuff
-
     if (addr.getAddress().to_v6().is_multicast()) {
         // both mcast (ALL_DHCP_RELAY_AGENTS_AND_SERVERS and ALL_DHCP_SERVERS)
         // are link and site-scoped, so there is no sense to join those groups
         // with global addresses.
 
-        if ( !joinMcast( sock, ifname,
+        if ( !joinMcast( sock, iface.getName(),
                          string(ALL_DHCP_RELAY_AGENTS_AND_SERVERS) ) ) {
             close(sock);
-            return (-1);
+            isc_throw(Unexpected, "Failed to join " << ALL_DHCP_RELAY_AGENTS_AND_SERVERS
+                      << " multicast group.");
         }
     }
 
-    cout << "Created socket " << sock << " on " << ifname << "/" <<
+    cout << "Created socket " << sock << " on " << iface.getName() << "/" <<
         addr.toText() << "/port=" << port << endl;
 
+    iface.addSocket(SocketInfo(sock, addr, port));
+
     return (sock);
 }
 
@@ -345,16 +441,19 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
     int result;
     struct in6_pktinfo *pktinfo;
     struct cmsghdr *cmsg;
+
+    Iface* iface = getIface(pkt->iface_);
+    if (!iface) {
+        isc_throw(BadValue, "Unable to send Pkt6. Invalid interface ("
+                  << pkt->iface_ << ") specified.");
+    }
+
     memset(&control_buf_[0], 0, control_buf_len_);
 
-    /*
-     * Initialize our message header structure.
-     */
+    // Initialize our message header structure.
     memset(&m, 0, sizeof(m));
 
-    /*
-     * Set the target address we're sending to.
-     */
+    // Set the target address we're sending to.
     sockaddr_in6 to;
     memset(&to, 0, sizeof(to));
     to.sin6_family = AF_INET6;
@@ -367,24 +466,20 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
     m.msg_name = &to;
     m.msg_namelen = sizeof(to);
 
-    /*
-     * Set the data buffer we're sending. (Using this wacky
-     * "scatter-gather" stuff... we only have a single chunk
-     * of data to send, so we declare a single vector entry.)
-     */
+    // Set the data buffer we're sending. (Using this wacky
+    // "scatter-gather" stuff... we only have a single chunk
+    // of data to send, so we declare a single vector entry.)
     v.iov_base = (char *) &pkt->data_[0];
     v.iov_len = pkt->data_len_;
     m.msg_iov = &v;
     m.msg_iovlen = 1;
 
-    /*
-     * Setting the interface is a bit more involved.
-     *
-     * We have to create a "control message", and set that to
-     * define the IPv6 packet information. We could set the
-     * source address if we wanted, but we can safely let the
-     * kernel decide what that should be.
-     */
+    // Setting the interface is a bit more involved.
+    //
+    // We have to create a "control message", and set that to
+    // define the IPv6 packet information. We could set the
+    // source address if we wanted, but we can safely let the
+    // kernel decide what that should be.
     m.msg_control = &control_buf_[0];
     m.msg_controllen = control_buf_len_;
     cmsg = CMSG_FIRSTHDR(&m);
@@ -396,14 +491,12 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
     pktinfo->ipi6_ifindex = pkt->ifindex_;
     m.msg_controllen = cmsg->cmsg_len;
 
-    result = sendmsg(sendsock_, &m, 0);
+    result = sendmsg(getSocket(*pkt), &m, 0);
     if (result < 0) {
         cout << "Send packet failed." << endl;
     }
-    cout << "Sent " << result << " bytes." << endl;
-
-    cout << "Sent " << pkt->data_len_ << " bytes over "
-         << pkt->iface_ << "/" << pkt->ifindex_ << " interface: "
+    cout << "Sent " << pkt->data_len_ << " bytes over socket " << getSocket(*pkt)
+         << " on " << iface->getFullName() << " interface: "
          << " dst=" << pkt->remote_addr_.toText()
          << ", src=" << pkt->local_addr_.toText()
          << endl;
@@ -411,8 +504,24 @@ IfaceMgr::send(boost::shared_ptr<Pkt6>& pkt) {
     return (result);
 }
 
+bool
+IfaceMgr::send(boost::shared_ptr<Pkt4>& )
+{
+    /// TODO: Implement this (ticket #1240)
+    isc_throw(NotImplemented, "Pkt4 send not implemented yet.");
+}
+
+
+boost::shared_ptr<Pkt4>
+IfaceMgr::receive4() {
+    isc_throw(NotImplemented, "Pkt4 reception not implemented yet.");
+
+    // TODO: To be implemented (ticket #1239)
+    return (boost::shared_ptr<Pkt4>()); // NULL
+}
+
 boost::shared_ptr<Pkt6>
-IfaceMgr::receive() {
+IfaceMgr::receive6() {
     struct msghdr m;
     struct iovec v;
     int result;
@@ -442,49 +551,66 @@ IfaceMgr::receive() {
     memset(&from, 0, sizeof(from));
     memset(&to_addr, 0, sizeof(to_addr));
 
-    /*
-     * Initialize our message header structure.
-     */
+    // Initialize our message header structure.
     memset(&m, 0, sizeof(m));
 
-    /*
-     * Point so we can get the from address.
-     */
+    // Point so we can get the from address.
     m.msg_name = &from;
     m.msg_namelen = sizeof(from);
 
-    /*
-     * Set the data buffer we're receiving. (Using this wacky
-     * "scatter-gather" stuff... but we that doesn't really make
-     * sense for us, so we use a single vector entry.)
-     */
+    // Set the data buffer we're receiving. (Using this wacky
+    // "scatter-gather" stuff... but we that doesn't really make
+    // sense for us, so we use a single vector entry.)
     v.iov_base = (void*)&pkt->data_[0];
     v.iov_len = pkt->data_len_;
     m.msg_iov = &v;
     m.msg_iovlen = 1;
 
-    /*
-     * Getting the interface is a bit more involved.
-     *
-     * We set up some space for a "control message". We have
-     * previously asked the kernel to give us packet
-     * information (when we initialized the interface), so we
-     * should get the destination address from that.
-     */
+    // Getting the interface is a bit more involved.
+    //
+    // We set up some space for a "control message". We have
+    // previously asked the kernel to give us packet
+    // information (when we initialized the interface), so we
+    // should get the destination address from that.
     m.msg_control = &control_buf_[0];
     m.msg_controllen = control_buf_len_;
 
-    result = recvmsg(recvsock_, &m, 0);
+    /// TODO: Need to move to select() and pool over
+    /// all available sockets. For now, we just take the
+    /// first interface and use first socket from it.
+    IfaceCollection::const_iterator iface = ifaces_.begin();
+    if (iface == ifaces_.end()) {
+        isc_throw(Unexpected, "No interfaces detected. Can't receive anything.");
+    }
+    SocketCollection::const_iterator s = iface->sockets_.begin();
+    const SocketInfo* candidate = 0;
+    while (s != iface->sockets_.end()) {
+        if (s->addr_.getAddress().to_v6().is_multicast()) {
+            candidate = &(*s);
+            break;
+        }
+        if (!candidate) {
+            candidate = &(*s); // it's not multicast, but it's better than none
+        }
+        ++s;
+    }
+    if (!candidate) {
+        isc_throw(Unexpected, "Interface " << iface->getFullName()
+                  << " does not have any sockets open.");
+    }
+
+    cout << "Trying to receive over socket " << candidate->sockfd_ << " bound to "
+         << candidate->addr_.toText() << "/port=" << candidate->port_ << " on "
+         << iface->getFullName() << endl;
+    result = recvmsg(candidate->sockfd_, &m, 0);
 
     if (result >= 0) {
-        /*
-         * If we did read successfully, then we need to loop
-         * through the control messages we received and
-         * find the one with our destination address.
-         *
-         * We also keep a flag to see if we found it. If we
-         * didn't, then we consider this to be an error.
-         */
+        // If we did read successfully, then we need to loop
+        // through the control messages we received and
+        // find the one with our destination address.
+        //
+        // We also keep a flag to see if we found it. If we
+        // didn't, then we consider this to be an error.
         int found_pktinfo = 0;
         cmsg = CMSG_FIRSTHDR(&m);
         while (cmsg != NULL) {
@@ -520,7 +646,7 @@ IfaceMgr::receive() {
 
     Iface* received = getIface(pkt->ifindex_);
     if (received) {
-        pkt->iface_ = received->name_;
+        pkt->iface_ = received->getName();
     } else {
         cout << "Received packet over unknown interface (ifindex="
              << pkt->ifindex_ << ")." << endl;
@@ -539,4 +665,60 @@ IfaceMgr::receive() {
     return (pkt);
 }
 
+uint16_t
+IfaceMgr::getSocket(isc::dhcp::Pkt6 const& pkt) {
+    Iface* iface = getIface(pkt.iface_);
+    if (!iface) {
+        isc_throw(BadValue, "Tried to find socket for non-existent interface "
+                  << pkt.iface_);
+    }
+
+    SocketCollection::const_iterator s;
+    for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+        if (s->family_ != AF_INET6) {
+            // don't use IPv4 sockets
+            continue;
+        }
+        if (s->addr_.getAddress().to_v6().is_multicast()) {
+            // don't use IPv6 sockets bound to multicast address
+            continue;
+        }
+        /// TODO: Add more checks here later. If remote address is
+        /// not link-local, we can't use link local bound socket
+        /// to send data.
+
+        return (s->sockfd_);
+    }
+
+    isc_throw(Unexpected, "Interface " << iface->getFullName()
+              << " does not have any suitable IPv6 sockets open.");
+}
+
+uint16_t
+IfaceMgr::getSocket(isc::dhcp::Pkt4 const& pkt) {
+    Iface* iface = getIface(pkt.getIface());
+    if (!iface) {
+        isc_throw(BadValue, "Tried to find socket for non-existent interface "
+                  << pkt.getIface());
+    }
+
+    SocketCollection::const_iterator s;
+    for (s = iface->sockets_.begin(); s != iface->sockets_.end(); ++s) {
+        if (s->family_ != AF_INET) {
+            // don't use IPv4 sockets
+            continue;
+        }
+        /// TODO: Add more checks here later. If remote address is
+        /// not link-local, we can't use link local bound socket
+        /// to send data.
+
+        return (s->sockfd_);
+    }
+
+    isc_throw(Unexpected, "Interface " << iface->getFullName()
+              << " does not have any suitable IPv4 sockets open.");
+}
+
+
+
 }
diff --git a/src/bin/dhcp6/iface_mgr.h b/src/bin/dhcp6/iface_mgr.h
index 249c7ef..0aa2592 100644
--- a/src/bin/dhcp6/iface_mgr.h
+++ b/src/bin/dhcp6/iface_mgr.h
@@ -19,8 +19,9 @@
 #include <boost/shared_ptr.hpp>
 #include <boost/scoped_array.hpp>
 #include <boost/noncopyable.hpp>
-#include "asiolink/io_address.h"
-#include "dhcp/pkt6.h"
+#include <asiolink/io_address.h>
+#include <dhcp/pkt4.h>
+#include <dhcp/pkt6.h>
 
 namespace isc {
 
@@ -34,26 +35,119 @@ namespace dhcp {
 class IfaceMgr : public boost::noncopyable {
 public:
     /// type that defines list of addresses
-    typedef std::list<isc::asiolink::IOAddress> Addr6Lst;
+    typedef std::vector<isc::asiolink::IOAddress> AddressCollection;
 
     /// maximum MAC address length (Infiniband uses 20 bytes)
     static const unsigned int MAX_MAC_LEN = 20;
 
+    /// Holds information about socket.
+    struct SocketInfo {
+        uint16_t sockfd_; /// socket descriptor
+        isc::asiolink::IOAddress addr_; /// bound address
+        uint16_t port_;   /// socket port
+        uint16_t family_; /// IPv4 or IPv6
+
+        /// @brief SocketInfo constructor.
+        ///
+        /// @param sockfd socket descriptor
+        /// @param addr an address the socket is bound to
+        /// @param port a port the socket is bound to
+        SocketInfo(uint16_t sockfd, const isc::asiolink::IOAddress& addr,
+                   uint16_t port)
+        :sockfd_(sockfd), addr_(addr), port_(port), family_(addr.getFamily()) { }
+    };
+
+    /// type that holds a list of socket informations
+    typedef std::list<SocketInfo> SocketCollection;
+
     /// @brief represents a single network interface
     ///
     /// Iface structure represents network interface with all useful
     /// information, like name, interface index, MAC address and
     /// list of assigned addresses
-    struct Iface {
-        /// constructor
+    class Iface {
+    public:
+        /// @brief Iface constructor.
+        ///
+        /// Creates Iface object that represents network interface.
+        ///
+        /// @param name name of the interface
+        /// @param ifindex interface index (unique integer identifier)
         Iface(const std::string& name, int ifindex);
 
-        /// returns full interface name in format ifname/ifindex
+        /// @brief Returns full interface name as "ifname/ifindex" string.
+        ///
+        /// @return string with interface name
         std::string getFullName() const;
 
-        /// returns link-layer address a plain text
+        /// @brief Returns link-layer address a plain text.
+        ///
+        /// @return MAC address as a plain text (string)
         std::string getPlainMac() const;
 
+        /// @brief Returns interface index.
+        ///
+        /// @return interface index
+        uint16_t getIndex() const { return ifindex_; }
+
+        /// @brief Returns interface name.
+        ///
+        /// @return interface name
+        std::string getName() const { return name_; };
+
+        /// @brief Returns all interfaces available on an interface.
+        ///
+        /// Care should be taken to not use this collection after Iface object
+        /// ceases to exist. That is easy in most cases as Iface objects are
+        /// created by IfaceMgr that is a singleton an is expected to be
+        /// available at all time. We may revisit this if we ever decide to
+        /// implement dynamic interface detection, but such fancy feature would
+        /// mostly be useful for clients with wifi/vpn/virtual interfaces.
+        ///
+        /// @return collection of addresses
+        const AddressCollection& getAddresses() const { return addrs_; }
+
+        /// @brief Adds an address to an interface.
+        ///
+        /// This only adds an address to collection, it does not physically
+        /// configure address on actual network interface.
+        ///
+        /// @param addr address to be added
+        void addAddress(const isc::asiolink::IOAddress& addr) {
+            addrs_.push_back(addr);
+        }
+
+        /// @brief Deletes an address from an interface.
+        ///
+        /// This only deletes address from collection, it does not physically
+        /// remove address configuration from actual network interface.
+        ///
+        /// @param addr address to be removed.
+        ///
+        /// @return true if removal was successful (address was in collection),
+        ///         false otherwise
+        bool delAddress(const isc::asiolink::IOAddress& addr);
+
+        /// @brief Adds socket descriptor to an interface.
+        ///
+        /// @param socket SocketInfo structure that describes socket.
+        void addSocket(const SocketInfo& sock)
+            { sockets_.push_back(sock); }
+
+        /// @brief Closes socket.
+        ///
+        /// Closes socket and removes corresponding SocketInfo structure
+        /// from an interface.
+        ///
+        /// @param socket descriptor to be closed/removed.
+        /// @return true if there was such socket, false otherwise
+        bool delSocket(uint16_t sockfd);
+
+        /// socket used to sending data
+        /// TODO: this should be protected
+        SocketCollection sockets_;
+
+    protected:
         /// network interface name
         std::string name_;
 
@@ -61,19 +155,13 @@ public:
         int ifindex_;
 
         /// list of assigned addresses
-        Addr6Lst addrs_;
+        AddressCollection addrs_;
 
         /// link-layer address
         uint8_t mac_[MAX_MAC_LEN];
 
         /// length of link-layer address (usually 6)
         int mac_len_;
-
-        /// socket used to sending data
-        int sendsock_;
-
-        /// socket used for receiving data
-        int recvsock_;
     };
 
     // TODO performance improvement: we may change this into
@@ -81,7 +169,7 @@ public:
     //      also hide it (make it public make tests easier for now)
 
     /// type that holds a list of interfaces
-    typedef std::list<Iface> IfaceLst;
+    typedef std::list<Iface> IfaceCollection;
 
     /// IfaceMgr is a singleton class. This method returns reference
     /// to its sole instance.
@@ -109,27 +197,63 @@ public:
     Iface*
     getIface(const std::string& ifname);
 
+    /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+    ///
+    /// This method takes Pkt6 (see overloaded implementation that takes
+    /// Pkt4) and chooses appropriate socket to send it. This method
+    /// may throw BadValue if specified packet does not have outbound
+    /// interface specified, no such interface exists, or specified
+    /// interface does not have any appropriate sockets open.
+    ///
+    /// @param pkt a packet to be transmitted
+    ///
+    /// @return a socket descriptor
+    uint16_t getSocket(const isc::dhcp::Pkt6& pkt);
+
+    /// @brief Return most suitable socket for transmitting specified IPv6 packet.
+    ///
+    /// This method takes Pkt4 (see overloaded implementation that takes
+    /// Pkt6) and chooses appropriate socket to send it. This method
+    /// may throw BadValue if specified packet does not have outbound
+    /// interface specified, no such interface exists, or specified
+    /// interface does not have any appropriate sockets open.
+    ///
+    /// @param pkt a packet to be transmitted
+    ///
+    /// @return a socket descriptor
+    uint16_t getSocket(const isc::dhcp::Pkt4& pkt);
+
     /// debugging method that prints out all available interfaces
     ///
     /// @param out specifies stream to print list of interfaces to
     void
     printIfaces(std::ostream& out = std::cout);
 
-    /// @brief Sends a packet.
+    /// @brief Sends an IPv6 packet.
     ///
-    /// Sends a packet. All parameters for actual transmission are specified in
+    /// Sends an IPv6 packet. All parameters for actual transmission are specified in
     /// Pkt6 structure itself. That includes destination address, src/dst port
     /// and interface over which data will be sent.
     ///
     /// @param pkt packet to be sent
     ///
     /// @return true if sending was successful
-    bool
-    send(boost::shared_ptr<Pkt6>& pkt);
+    bool send(boost::shared_ptr<Pkt6>& pkt);
 
-    /// @brief Tries to receive packet over open sockets.
+    /// @brief Sends an IPv4 packet.
     ///
-    /// Attempts to receive a single packet of any of the open sockets.
+    /// Sends an IPv4 packet. All parameters for actual transmission are specified
+    /// in Pkt4 structure itself. That includes destination address, src/dst
+    /// port and interface over which data will be sent.
+    ///
+    /// @param pkt a packet to be sent
+    ///
+    /// @return true if sending was successful
+    bool send(boost::shared_ptr<Pkt4>& pkt);
+
+    /// @brief Tries to receive IPv6 packet over open IPv6 sockets.
+    ///
+    /// Attempts to receive a single IPv6 packet of any of the open IPv6 sockets.
     /// If reception is successful and all information about its sender
     /// are obtained, Pkt6 object is created and returned.
     ///
@@ -138,7 +262,49 @@ public:
     /// (e.g. remove expired leases)
     ///
     /// @return Pkt6 object representing received packet (or NULL)
-    boost::shared_ptr<Pkt6> receive();
+    boost::shared_ptr<Pkt6> receive6();
+
+    /// @brief Tries to receive IPv4 packet over open IPv4 sockets.
+    ///
+    /// Attempts to receive a single IPv4 packet of any of the open IPv4 sockets.
+    /// If reception is successful and all information about its sender
+    /// are obtained, Pkt4 object is created and returned.
+    ///
+    /// TODO Start using select() and add timeout to be able
+    /// to not wait infinitely, but rather do something useful
+    /// (e.g. remove expired leases)
+    ///
+    /// @return Pkt4 object representing received packet (or NULL)
+    boost::shared_ptr<Pkt4> receive4();
+
+    /// Opens UDP/IP socket and binds it to address, interface and port.
+    ///
+    /// Specific type of socket (UDP/IPv4 or UDP/IPv6) depends on passed addr
+    /// family.
+    ///
+    /// @param ifname name of the interface
+    /// @param addr address to be bound.
+    /// @param port UDP port.
+    ///
+    /// Method will throw if socket creation, socket binding or multicast
+    /// join fails.
+    ///
+    /// @return socket descriptor, if socket creation, binding and multicast
+    /// group join were all successful.
+    int openSocket(const std::string& ifname,
+                   const isc::asiolink::IOAddress& addr, int port);
+
+    /// Opens IPv6 sockets on detected interfaces.
+    ///
+    /// Will throw exception if socket creation fails.
+    ///
+    /// @param port specifies port number (usually DHCP6_SERVER_PORT)
+    void openSockets(uint16_t port);
+
+
+    /// @brief Closes all open sockets.
+    /// Is used in destructor, but also from Dhcpv4_srv and Dhcpv6_srv classes.
+    void closeSockets();
 
     // don't use private, we need derived classes in tests
 protected:
@@ -146,11 +312,44 @@ protected:
     /// @brief Protected constructor.
     ///
     /// Protected constructor. This is a singleton class. We don't want
-    /// anyone to create instances of IfaceMgr. Use instance() method
+    /// anyone to create instances of IfaceMgr. Use instance() method instead.
     IfaceMgr();
 
     ~IfaceMgr();
 
+    /// @brief Opens IPv4 socket.
+    ///
+    /// Please do not use this method directly. Use openSocket instead.
+    ///
+    /// This method may throw exception if socket creation fails.
+    ///
+    /// @param iface reference to interface structure.
+    /// @param addr an address the created socket should be bound to
+    /// @param port a port that created socket should be bound to
+    ///
+    /// @return socket descriptor
+    int openSocket4(Iface& iface, const isc::asiolink::IOAddress& addr, int port);
+
+    /// @brief Opens IPv6 socket.
+    ///
+    /// Please do not use this method directly. Use openSocket instead.
+    ///
+    /// This method may throw exception if socket creation fails.
+    ///
+    /// @param iface reference to interface structure.
+    /// @param addr an address the created socket should be bound to
+    /// @param port a port that created socket should be bound to
+    ///
+    /// @return socket descriptor
+    int openSocket6(Iface& iface, const isc::asiolink::IOAddress& addr, int port);
+
+    /// @brief Adds an interface to list of known interfaces.
+    ///
+    /// @param iface reference to Iface object.
+    void addInterface(const Iface& iface) {
+        ifaces_.push_back(iface);
+    }
+
     /// @brief Detects network interfaces.
     ///
     /// This method will eventually detect available interfaces. For now
@@ -159,24 +358,11 @@ protected:
     void
     detectIfaces();
 
-    ///
-    /// Opens UDP/IPv6 socket and binds it to address, interface and port.
-    ///
-    /// @param ifname name of the interface
-    /// @param addr address to be bound.
-    /// @param port UDP port.
-    ///
-    /// @return socket descriptor, if socket creation, binding and multicast
-    /// group join were all successful. -1 otherwise.
-    int openSocket(const std::string& ifname,
-                   const isc::asiolink::IOAddress& addr,
-                   int port);
-
     // TODO: having 2 maps (ifindex->iface and ifname->iface would)
     //      probably be better for performance reasons
 
     /// List of available interfaces
-    IfaceLst ifaces_;
+    IfaceCollection ifaces_;
 
     /// a pointer to a sole instance of this class (a singleton)
     static IfaceMgr * instance_;
@@ -184,8 +370,9 @@ protected:
     // TODO: Also keep this interface on Iface once interface detection
     // is implemented. We may need it e.g. to close all sockets on
     // specific interface
-    int recvsock_; // TODO: should be fd_set eventually, but we have only
-    int sendsock_; // 2 sockets for now. Will do for until next release
+    //int recvsock_; // TODO: should be fd_set eventually, but we have only
+    //int sendsock_; // 2 sockets for now. Will do for until next release
+
     // we can't use the same socket, as receiving socket
     // is bound to multicast address. And we all know what happens
     // to people who try to use multicast as source address.
@@ -197,9 +384,6 @@ protected:
     boost::scoped_array<char> control_buf_;
 
 private:
-    /// Opens sockets on detected interfaces.
-    bool
-    openSockets();
 
     /// creates a single instance of this class (a singleton implementation)
     static void
@@ -221,6 +405,7 @@ private:
     bool
     joinMcast(int sock, const std::string& ifname,
               const std::string& mcast);
+
 };
 
 }; // namespace isc::dhcp
diff --git a/src/bin/dhcp6/tests/Makefile.am b/src/bin/dhcp6/tests/Makefile.am
index 985368e..f37194c 100644
--- a/src/bin/dhcp6/tests/Makefile.am
+++ b/src/bin/dhcp6/tests/Makefile.am
@@ -25,8 +25,6 @@ check-local:
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_builddir)/src/bin # for generated spec_config.h header
 AM_CPPFLAGS += -I$(top_srcdir)/src/bin
-AM_CPPFLAGS += -I$(top_builddir)/src/lib/cc
-AM_CPPFLAGS += -I$(top_srcdir)/src/lib/asiolink
 AM_CPPFLAGS += $(BOOST_INCLUDES)
 AM_CPPFLAGS += -DTEST_DATA_DIR=\"$(abs_top_srcdir)/src/lib/testutils/testdata\"
 AM_CPPFLAGS += -DTEST_DATA_BUILDDIR=\"$(abs_top_builddir)/src/bin/dhcp6/tests\"
@@ -57,8 +55,8 @@ dhcp6_unittests_LDADD = $(GTEST_LDADD)
 dhcp6_unittests_LDADD += $(SQLITE_LIBS)
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/asiolink/libasiolink.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/dhcp/libdhcp.la
-dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 dhcp6_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+dhcp6_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 endif
 
 noinst_PROGRAMS = $(TESTS)
diff --git a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
index 72e48e4..50f37af 100644
--- a/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
+++ b/src/bin/dhcp6/tests/dhcp6_srv_unittest.cc
@@ -34,7 +34,7 @@ namespace test {
 class NakedDhcpv6Srv: public Dhcpv6Srv {
     // "naked" Interface Manager, exposes internal fields
 public:
-    NakedDhcpv6Srv() { }
+    NakedDhcpv6Srv():Dhcpv6Srv(DHCP6_SERVER_PORT + 10000) { }
 
     boost::shared_ptr<Pkt6>
     processSolicit(boost::shared_ptr<Pkt6>& request) {
@@ -53,30 +53,27 @@ public:
 };
 
 TEST_F(Dhcpv6SrvTest, basic) {
-    // there's almost no code now. What's there provides echo capability
-    // that is just a proof of concept and will be removed soon
-    // No need to thoroughly test it
-
     // srv has stubbed interface detection. It will read
     // interfaces.txt instead. It will pretend to have detected
     // fe80::1234 link-local address on eth0 interface. Obviously
     // an attempt to bind this socket will fail.
-    EXPECT_NO_THROW( {
-        Dhcpv6Srv * srv = new Dhcpv6Srv();
-
-        delete srv;
-        });
+    Dhcpv6Srv* srv = 0;
+    ASSERT_NO_THROW( {
+        // open an unpriviledged port
+        srv = new Dhcpv6Srv(DHCP6_SERVER_PORT + 10000);
+    });
 
+    delete srv;
 }
 
 TEST_F(Dhcpv6SrvTest, Solicit_basic) {
     NakedDhcpv6Srv * srv = 0;
-    EXPECT_NO_THROW( srv = new NakedDhcpv6Srv(); );
+    ASSERT_NO_THROW( srv = new NakedDhcpv6Srv(); );
 
     // a dummy content for client-id
     boost::shared_array<uint8_t> clntDuid(new uint8_t[32]);
-    for (int i=0; i<32; i++)
-        clntDuid[i] = 100+i;
+    for (int i = 0; i < 32; i++)
+        clntDuid[i] = 100 + i;
 
     boost::shared_ptr<Pkt6> sol =
         boost::shared_ptr<Pkt6>(new Pkt6(DHCPV6_SOLICIT,
diff --git a/src/bin/dhcp6/tests/iface_mgr_unittest.cc b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
index f126e6a..3cc2ae8 100644
--- a/src/bin/dhcp6/tests/iface_mgr_unittest.cc
+++ b/src/bin/dhcp6/tests/iface_mgr_unittest.cc
@@ -20,9 +20,10 @@
 #include <arpa/inet.h>
 #include <gtest/gtest.h>
 
-#include "io_address.h"
-#include "dhcp/pkt6.h"
-#include "dhcp6/iface_mgr.h"
+#include <asiolink/io_address.h>
+#include <dhcp/pkt6.h>
+#include <dhcp6/iface_mgr.h>
+#include <dhcp/dhcp4.h>
 
 using namespace std;
 using namespace isc;
@@ -39,16 +40,7 @@ class NakedIfaceMgr: public IfaceMgr {
     // "naked" Interface Manager, exposes internal fields
 public:
     NakedIfaceMgr() { }
-    IfaceLst & getIfacesLst() { return ifaces_; }
-    void setSendSock(int sock) { sendsock_ = sock; }
-    void setRecvSock(int sock) { recvsock_ = sock; }
-
-    int openSocket(const std::string& ifname,
-                   const isc::asiolink::IOAddress& addr,
-                   int port) {
-        return IfaceMgr::openSocket(ifname, addr, port);
-    }
-
+    IfaceCollection & getIfacesLst() { return ifaces_; }
 };
 
 // dummy class for now, but this will be expanded when needed
@@ -56,6 +48,13 @@ class IfaceMgrTest : public ::testing::Test {
 public:
     IfaceMgrTest() {
     }
+
+    void createLoInterfacesTxt() {
+        unlink(INTERFACE_FILE);
+        fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
+        fakeifaces << LOOPBACK << " ::1";
+        fakeifaces.close();
+    }
 };
 
 // We need some known interface to work reliably. Loopback interface
@@ -109,6 +108,7 @@ TEST_F(IfaceMgrTest, dhcp6Sniffer) {
     while (true) {
         pkt = ifacemgr->receive();
 
+        cout << "// this code is autogenerated. Do NOT edit." << endl;
         cout << "// Received " << pkt->data_len_ << " bytes packet:" << endl;
         cout << "Pkt6 *capture" << cnt++ << "() {" << endl;
         cout << "    Pkt6* pkt;" << endl;
@@ -183,10 +183,10 @@ TEST_F(IfaceMgrTest, getIface) {
 
     cout << "There are " << ifacemgr->getIfacesLst().size()
          << " interfaces." << endl;
-    for (IfaceMgr::IfaceLst::iterator iface=ifacemgr->getIfacesLst().begin();
+    for (IfaceMgr::IfaceCollection::iterator iface=ifacemgr->getIfacesLst().begin();
          iface != ifacemgr->getIfacesLst().end();
          ++iface) {
-        cout << "  " << iface->name_ << "/" << iface->ifindex_ << endl;
+        cout << "  " << iface->getFullName() << endl;
     }
 
 
@@ -195,15 +195,15 @@ TEST_F(IfaceMgrTest, getIface) {
     // ASSERT_NE(NULL, tmp); is not supported. hmmmm.
     ASSERT_TRUE( tmp != NULL );
 
-    EXPECT_STREQ( "en3", tmp->name_.c_str() );
-    EXPECT_EQ(5, tmp->ifindex_);
+    EXPECT_EQ( "en3", tmp->getName() );
+    EXPECT_EQ(5, tmp->getIndex());
 
     // check that interface can be retrieved by name
     tmp = ifacemgr->getIface("lo1");
     ASSERT_TRUE( tmp != NULL );
 
-    EXPECT_STREQ( "lo1", tmp->name_.c_str() );
-    EXPECT_EQ(1, tmp->ifindex_);
+    EXPECT_EQ( "lo1", tmp->getName() );
+    EXPECT_EQ(1, tmp->getIndex());
 
     // check that non-existing interfaces are not returned
     EXPECT_EQ(static_cast<void*>(NULL), ifacemgr->getIface("wifi0") );
@@ -231,58 +231,51 @@ TEST_F(IfaceMgrTest, detectIfaces) {
     IfaceMgr::Iface * eth0 = ifacemgr->getIface("eth0");
 
     // there should be one address
-    EXPECT_EQ(1, eth0->addrs_.size());
+    IfaceMgr::AddressCollection addrs = eth0->getAddresses();
+    ASSERT_EQ(1, addrs.size());
 
-    IOAddress * addr = &(*eth0->addrs_.begin());
-    ASSERT_TRUE( addr != NULL );
+    IOAddress addr = *addrs.begin();
 
-    EXPECT_STREQ( "fe80::1234", addr->toText().c_str() );
+    EXPECT_STREQ( "fe80::1234", addr.toText().c_str() );
 
     delete ifacemgr;
 }
 
-// TODO: disabled due to other naming on various systems
-// (lo in Linux, lo0 in BSD systems)
-// Fix for this is available on 1186 branch, will reenable
-// this test once 1186 is merged
-TEST_F(IfaceMgrTest, DISABLED_sockets) {
+TEST_F(IfaceMgrTest, sockets6) {
     // testing socket operation in a portable way is tricky
     // without interface detection implemented
 
+    createLoInterfacesTxt();
+
     NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
 
     IOAddress loAddr("::1");
 
+    Pkt6 pkt6(128);
+    pkt6.iface_ = LOOPBACK;
+
     // bind multicast socket to port 10547
     int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
     EXPECT_GT(socket1, 0); // socket > 0
 
+    EXPECT_EQ(socket1, ifacemgr->getSocket(pkt6));
+
     // bind unicast socket to port 10548
     int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10548);
     EXPECT_GT(socket2, 0);
 
-    // expect success. This address/port is already bound, but
-    // we are using SO_REUSEADDR, so we can bind it twice
-    int socket3 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
-
-    // rebinding succeeds on Linux, fails on BSD
-    // TODO: add OS-specific defines here (or modify code to
-    // behave the same way on all OSes, but that may not be
-    // possible
-    // EXPECT_GT(socket3, 0); // socket > 0
-
-    // we now have 3 sockets open at the same time. Looks good.
+    // removed code for binding socket twice to the same address/port
+    // as it caused problems on some platforms (e.g. Mac OS X)
 
     close(socket1);
     close(socket2);
-    close(socket3);
 
     delete ifacemgr;
 }
 
 // TODO: disabled due to other naming on various systems
 // (lo in Linux, lo0 in BSD systems)
-TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
+TEST_F(IfaceMgrTest, DISABLED_sockets6Mcast) {
     // testing socket operation in a portable way is tricky
     // without interface detection implemented
 
@@ -311,27 +304,21 @@ TEST_F(IfaceMgrTest, DISABLED_socketsMcast) {
     delete ifacemgr;
 }
 
-// TODO: disabled due to other naming on various systems
-// (lo in Linux, lo0 in BSD systems)
-// Fix for this is available on 1186 branch, will reenable
-// this test once 1186 is merged
-TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
+TEST_F(IfaceMgrTest, sendReceive6) {
+
     // testing socket operation in a portable way is tricky
     // without interface detection implemented
+    createLoInterfacesTxt();
 
-    fstream fakeifaces(INTERFACE_FILE, ios::out|ios::trunc);
-    fakeifaces << LOOPBACK << " ::1";
-    fakeifaces.close();
-
-    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+    NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
 
     // let's assume that every supported OS have lo interface
     IOAddress loAddr("::1");
-    int socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
-    int socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
-
-    ifacemgr->setSendSock(socket2);
-    ifacemgr->setRecvSock(socket1);
+    int socket1 = 0, socket2 = 0;
+    EXPECT_NO_THROW(
+        socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, 10547);
+        socket2 = ifacemgr->openSocket(LOOPBACK, loAddr, 10546);
+    );
 
     boost::shared_ptr<Pkt6> sendPkt(new Pkt6(128) );
 
@@ -349,7 +336,7 @@ TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
 
     EXPECT_EQ(true, ifacemgr->send(sendPkt));
 
-    rcvPkt = ifacemgr->receive();
+    rcvPkt = ifacemgr->receive6();
 
     ASSERT_TRUE( rcvPkt ); // received our own packet
 
@@ -359,7 +346,168 @@ TEST_F(IfaceMgrTest, DISABLED_sendReceive) {
                         rcvPkt->data_len_) );
 
     EXPECT_EQ(sendPkt->remote_addr_.toText(), rcvPkt->remote_addr_.toText());
-    EXPECT_EQ(rcvPkt->remote_port_, 10546);
+
+    // since we opened 2 sockets on the same interface and none of them is multicast,
+    // none is preferred over the other for sending data, so we really should not
+    // assume the one or the other will always be choosen for sending data. Therefore
+    // we should accept both values as source ports.
+    EXPECT_TRUE( (rcvPkt->remote_port_ == 10546) || (rcvPkt->remote_port_ == 10547) );
+
+    delete ifacemgr;
+}
+
+TEST_F(IfaceMgrTest, socket4) {
+
+    createLoInterfacesTxt();
+    NakedIfaceMgr* ifacemgr = new NakedIfaceMgr();
+
+    // Let's assume that every supported OS have lo interface.
+    IOAddress loAddr("127.0.0.1");
+    // Use unprivileged port (it's convenient for running tests as non-root).
+    int socket1 = 0;
+
+    EXPECT_NO_THROW(
+        socket1 = ifacemgr->openSocket(LOOPBACK, loAddr, DHCP4_SERVER_PORT + 10000);
+    );
+
+    EXPECT_GT(socket1, 0);
+
+    Pkt4 pkt(DHCPDISCOVER, 1234);
+    pkt.setIface(LOOPBACK);
+
+    // Expect that we get the socket that we just opened.
+    EXPECT_EQ(socket1, ifacemgr->getSocket(pkt));
+
+    close(socket1);
+
+    delete ifacemgr;
+}
+
+// Test the Iface structure itself
+TEST_F(IfaceMgrTest, iface) {
+    IfaceMgr::Iface* iface = 0;
+    EXPECT_NO_THROW(
+        iface = new IfaceMgr::Iface("eth0",1);
+    );
+
+    EXPECT_EQ("eth0", iface->getName());
+    EXPECT_EQ(1, iface->getIndex());
+    EXPECT_EQ("eth0/1", iface->getFullName());
+
+    // Let's make a copy of this address collection.
+    IfaceMgr::AddressCollection addrs = iface->getAddresses();
+
+    EXPECT_EQ(0, addrs.size());
+
+    IOAddress addr1("192.0.2.6");
+    iface->addAddress(addr1);
+
+    addrs = iface->getAddresses();
+    ASSERT_EQ(1, addrs.size());
+    EXPECT_EQ("192.0.2.6", addrs.at(0).toText());
+
+    // No such address, should return false.
+    EXPECT_FALSE(iface->delAddress(IOAddress("192.0.8.9")));
+
+    // This address is present, delete it!
+    EXPECT_TRUE(iface->delAddress(IOAddress("192.0.2.6")));
+
+    // Not really necessary, previous reference still points to the same
+    // collection. Let's do it anyway, as test code may serve as example
+    // usage code as well.
+    addrs = iface->getAddresses();
+
+    EXPECT_EQ(0, addrs.size());
+
+    EXPECT_NO_THROW(
+        delete iface;
+    );
+}
+
+TEST_F(IfaceMgrTest, socketInfo) {
+
+    // check that socketinfo for IPv4 socket is functional
+    IfaceMgr::SocketInfo sock1(7, IOAddress("192.0.2.56"), DHCP4_SERVER_PORT + 7);
+    EXPECT_EQ(7, sock1.sockfd_);
+    EXPECT_EQ("192.0.2.56", sock1.addr_.toText());
+    EXPECT_EQ(AF_INET, sock1.family_);
+    EXPECT_EQ(DHCP4_SERVER_PORT + 7, sock1.port_);
+
+    // check that socketinfo for IPv6 socket is functional
+    IfaceMgr::SocketInfo sock2(9, IOAddress("2001:db8:1::56"), DHCP4_SERVER_PORT + 9);
+    EXPECT_EQ(9, sock2.sockfd_);
+    EXPECT_EQ("2001:db8:1::56", sock2.addr_.toText());
+    EXPECT_EQ(AF_INET6, sock2.family_);
+    EXPECT_EQ(DHCP4_SERVER_PORT + 9, sock2.port_);
+
+    // now let's test if IfaceMgr handles socket info properly
+    createLoInterfacesTxt();
+    NakedIfaceMgr * ifacemgr = new NakedIfaceMgr();
+    IfaceMgr::Iface* loopback = ifacemgr->getIface(LOOPBACK);
+    ASSERT_TRUE(loopback);
+    loopback->addSocket(sock1);
+    loopback->addSocket(sock2);
+
+    Pkt6 pkt6(100);
+
+    // pkt6 dos not have interface set yet
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt6),
+        BadValue
+    );
+
+    // try to send over non-existing interface
+    pkt6.iface_ = "nosuchinterface45";
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt6),
+        BadValue
+    );
+
+    // this will work
+    pkt6.iface_ = LOOPBACK;
+    EXPECT_EQ(9, ifacemgr->getSocket(pkt6));
+
+    bool deleted = false;
+    EXPECT_NO_THROW(
+        deleted = ifacemgr->getIface(LOOPBACK)->delSocket(9);
+    );
+    EXPECT_EQ(true, deleted);
+
+    // it should throw again, there's no usable socket anymore
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt6),
+        Unexpected
+    );
+
+    // repeat for pkt4
+    Pkt4 pkt4(DHCPDISCOVER, 1);
+
+    // pkt4 does not have interface set yet.
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt4),
+        BadValue
+    );
+
+    // Try to send over non-existing interface.
+    pkt4.setIface("nosuchinterface45");
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt4),
+        BadValue
+    );
+
+    // Socket info is set, packet has well defined interface. It should work.
+    pkt4.setIface(LOOPBACK);
+    EXPECT_EQ(7, ifacemgr->getSocket(pkt4));
+
+    EXPECT_NO_THROW(
+        ifacemgr->getIface(LOOPBACK)->delSocket(7);
+    );
+
+    // It should throw again, there's no usable socket anymore.
+    EXPECT_THROW(
+        ifacemgr->getSocket(pkt4),
+        Unexpected
+    );
 
     delete ifacemgr;
 }
diff --git a/src/bin/resolver/tests/Makefile.am b/src/bin/resolver/tests/Makefile.am
index 12ddab3..4d407bb 100644
--- a/src/bin/resolver/tests/Makefile.am
+++ b/src/bin/resolver/tests/Makefile.am
@@ -45,9 +45,9 @@ run_unittests_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
 run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
 run_unittests_LDADD += $(top_builddir)/src/lib/server_common/libserver_common.la
+run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cache/libcache.la
 run_unittests_LDADD += $(top_builddir)/src/lib/nsas/libnsas.la
-run_unittests_LDADD += $(top_builddir)/src/lib/resolve/libresolve.la
 run_unittests_LDADD += $(top_builddir)/src/lib/acl/libacl.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index cffafe1..cba98ae 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -27,5 +27,6 @@ endif
 	PYTHONPATH=$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/bin/xfrin:$(COMMON_PYTHON_PATH) \
 	TESTDATASRCDIR=$(abs_top_srcdir)/src/bin/xfrin/tests/testdata/ \
 	TESTDATAOBJDIR=$(abs_top_builddir)/src/bin/xfrin/tests/testdata/ \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrin/tests/xfrin_test.py b/src/bin/xfrin/tests/xfrin_test.py
index 3c41110..eb2c747 100644
--- a/src/bin/xfrin/tests/xfrin_test.py
+++ b/src/bin/xfrin/tests/xfrin_test.py
@@ -17,14 +17,21 @@ import unittest
 import re
 import shutil
 import socket
-import sqlite3
 import sys
 import io
 from isc.testutils.tsigctx_mock import MockTSIGContext
+from isc.testutils.rrset_utils import *
 from xfrin import *
 import xfrin
 from isc.xfrin.diff import Diff
 import isc.log
+# If we use any python library that is basically a wrapper for
+# a library we use as well (like sqlite3 in our datasources),
+# we must make sure we import ours first; If we have special
+# rpath or libtool rules to pick the correct version, python might
+# choose the wrong one first, if those rules aren't hit first.
+# This would result in missing symbols later.
+import sqlite3
 
 #
 # Commonly used (mostly constant) test parameters
@@ -36,11 +43,9 @@ TEST_RRCLASS_STR = 'IN'
 TEST_DB_FILE = 'db_file'
 TEST_MASTER_IPV4_ADDRESS = '127.0.0.1'
 TEST_MASTER_IPV4_ADDRINFO = (socket.AF_INET, socket.SOCK_STREAM,
-                             socket.IPPROTO_TCP, '',
                              (TEST_MASTER_IPV4_ADDRESS, 53))
 TEST_MASTER_IPV6_ADDRESS = '::1'
 TEST_MASTER_IPV6_ADDRINFO = (socket.AF_INET6, socket.SOCK_STREAM,
-                             socket.IPPROTO_TCP, '',
                              (TEST_MASTER_IPV6_ADDRESS, 53))
 
 TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
@@ -153,7 +158,7 @@ class MockDataSourceClient():
             return (DataSourceClient.PARTIALMATCH, self)
         raise ValueError('Unexpected input to mock client: bug in test case?')
 
-    def find(self, name, rrtype, target, options):
+    def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
         '''Mock ZoneFinder.find().
 
         It returns the predefined SOA RRset to queries for SOA of the common
@@ -224,7 +229,7 @@ class MockXfrinConnection(XfrinConnection):
     def __init__(self, sock_map, zone_name, rrclass, datasrc_client,
                  shutdown_event, master_addr, tsig_key=None):
         super().__init__(sock_map, zone_name, rrclass, MockDataSourceClient(),
-                         shutdown_event, master_addr)
+                         shutdown_event, master_addr, TEST_DB_FILE)
         self.query_data = b''
         self.reply_data = b''
         self.force_time_out = False
@@ -274,10 +279,11 @@ class MockXfrinConnection(XfrinConnection):
                 self.response_generator()
         return len(data)
 
-    def create_response_data(self, response=True, bad_qid=False,
+    def create_response_data(self, response=True, auth=True, bad_qid=False,
                              rcode=Rcode.NOERROR(),
                              questions=default_questions,
                              answers=default_answers,
+                             authorities=[],
                              tsig_ctx=None):
         resp = Message(Message.RENDER)
         qid = self.qid
@@ -288,8 +294,11 @@ class MockXfrinConnection(XfrinConnection):
         resp.set_rcode(rcode)
         if response:
             resp.set_header_flag(Message.HEADERFLAG_QR)
+        if auth:
+            resp.set_header_flag(Message.HEADERFLAG_AA)
         [resp.add_question(q) for q in questions]
         [resp.add_rrset(Message.SECTION_ANSWER, a) for a in answers]
+        [resp.add_rrset(Message.SECTION_AUTHORITY, a) for a in authorities]
 
         renderer = MessageRenderer()
         if tsig_ctx is not None:
@@ -342,13 +351,44 @@ class TestXfrinInitialSOA(TestXfrinState):
         self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual(type(XfrinFirstData()),
                          type(self.conn.get_xfrstate()))
-        self.assertEqual(1234, self.conn._end_serial)
+        self.assertEqual(1234, self.conn._end_serial.get_value())
 
     def test_handle_not_soa(self):
         # The given RR is not of SOA
         self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
                           self.ns_rrset)
 
+    def test_handle_ixfr_uptodate(self):
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRUptodate()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_uptodate2(self):
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(1235) # > soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinIXFRUptodate()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_ixfr_uptodate3(self):
+        # Similar to the previous case, but checking serial number arithmetic
+        # comparison
+        self.conn._request_type = RRType.IXFR()
+        self.conn._request_serial = isc.dns.Serial(0xffffffff)
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+
+    def test_handle_axfr_uptodate(self):
+        # "request serial" should matter only for IXFR
+        self.conn._request_type = RRType.AXFR()
+        self.conn._request_serial = isc.dns.Serial(1234) # same as soa_rrset
+        self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
+        self.assertEqual(type(XfrinFirstData()),
+                         type(self.conn.get_xfrstate()))
+
     def test_finish_message(self):
         self.assertTrue(self.state.finish_message(self.conn))
 
@@ -357,7 +397,8 @@ class TestXfrinFirstData(TestXfrinState):
         super().setUp()
         self.state = XfrinFirstData()
         self.conn._request_type = RRType.IXFR()
-        self.conn._request_serial = 1230 # arbitrary chosen serial < 1234
+        # arbitrary chosen serial < 1234:
+        self.conn._request_serial = isc.dns.Serial(1230)
         self.conn._diff = None           # should be replaced in the AXFR case
 
     def test_handle_ixfr_begin_soa(self):
@@ -437,7 +478,7 @@ class TestXfrinIXFRDelete(TestXfrinState):
         # false.
         self.assertFalse(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual([], self.conn._diff.get_buffer())
-        self.assertEqual(1234, self.conn._current_serial)
+        self.assertEqual(1234, self.conn._current_serial.get_value())
         self.assertEqual(type(XfrinIXFRAddSOA()),
                          type(self.conn.get_xfrstate()))
 
@@ -468,7 +509,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         # We need record the state in 'conn' to check the case where the
         # state doesn't change.
         XfrinIXFRAdd().set_xfrstate(self.conn, XfrinIXFRAdd())
-        self.conn._current_serial = 1230
+        self.conn._current_serial = isc.dns.Serial(1230)
         self.state = self.conn.get_xfrstate()
 
     def test_handle_add_rr(self):
@@ -480,7 +521,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         self.assertEqual(type(XfrinIXFRAdd()), type(self.conn.get_xfrstate()))
 
     def test_handle_end_soa(self):
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
         self.conn._diff.add_data(self.ns_rrset) # put some dummy change
         self.assertTrue(self.state.handle_rr(self.conn, soa_rrset))
         self.assertEqual(type(XfrinIXFREnd()), type(self.conn.get_xfrstate()))
@@ -489,7 +530,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
         self.assertEqual([], self.conn._diff.get_buffer())
 
     def test_handle_new_delete(self):
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
         # SOA RR whose serial is the current one means we are going to a new
         # difference, starting with removing that SOA.
         self.conn._diff.add_data(self.ns_rrset) # put some dummy change
@@ -500,7 +541,7 @@ class TestXfrinIXFRAdd(TestXfrinState):
 
     def test_handle_out_of_sync(self):
         # getting SOA with an inconsistent serial.  This is an error.
-        self.conn._end_serial = 1235
+        self.conn._end_serial = isc.dns.Serial(1235)
         self.assertRaises(XfrinProtocolError, self.state.handle_rr,
                           self.conn, soa_rrset)
 
@@ -519,11 +560,24 @@ class TestXfrinIXFREnd(TestXfrinState):
     def test_finish_message(self):
         self.assertFalse(self.state.finish_message(self.conn))
 
+class TestXfrinIXFREnd(TestXfrinState):
+    def setUp(self):
+        super().setUp()
+        self.state = XfrinIXFRUptodate()
+
+    def test_handle_rr(self):
+        self.assertRaises(XfrinProtocolError, self.state.handle_rr, self.conn,
+                          self.ns_rrset)
+
+    def test_finish_message(self):
+        self.assertRaises(XfrinZoneUptodate, self.state.finish_message,
+                          self.conn)
+
 class TestXfrinAXFR(TestXfrinState):
     def setUp(self):
         super().setUp()
         self.state = XfrinAXFR()
-        self.conn._end_serial = 1234
+        self.conn._end_serial = isc.dns.Serial(1234)
 
     def test_handle_rr(self):
         """
@@ -598,7 +652,10 @@ class TestXfrinConnection(unittest.TestCase):
             'questions': [example_soa_question],
             'bad_qid': False,
             'response': True,
+            'auth': True,
             'rcode': Rcode.NOERROR(),
+            'answers': default_answers,
+            'authorities': [],
             'tsig': False,
             'axfr_after_soa': self._create_normal_response_data
             }
@@ -655,8 +712,11 @@ class TestXfrinConnection(unittest.TestCase):
         self.conn.reply_data = self.conn.create_response_data(
             bad_qid=self.soa_response_params['bad_qid'],
             response=self.soa_response_params['response'],
+            auth=self.soa_response_params['auth'],
             rcode=self.soa_response_params['rcode'],
             questions=self.soa_response_params['questions'],
+            answers=self.soa_response_params['answers'],
+            authorities=self.soa_response_params['authorities'],
             tsig_ctx=verify_ctx)
         if self.soa_response_params['axfr_after_soa'] != None:
             self.conn.response_generator = \
@@ -687,6 +747,15 @@ class TestXfrinConnection(unittest.TestCase):
         rrset.add_rdata(Rdata(RRType.NS(), TEST_RRCLASS, nsname))
         return rrset
 
+    def _set_test_zone(self, zone_name):
+        '''Set the zone name for transfer to the specified one.
+
+        It also make sure that the SOA RR (if exist) is correctly (re)set.
+
+        '''
+        self.conn._zone_name = zone_name
+        self.conn._zone_soa = self.conn._get_zone_soa()
+
 class TestAXFR(TestXfrinConnection):
     def setUp(self):
         super().setUp()
@@ -781,25 +850,26 @@ class TestAXFR(TestXfrinConnection):
         # IXFR query
         msg = self.conn._create_query(RRType.IXFR())
         check_query(RRType.IXFR(), begin_soa_rrset)
-        self.assertEqual(1230, self.conn._request_serial)
+        self.assertEqual(1230, self.conn._request_serial.get_value())
 
     def test_create_ixfr_query_fail(self):
         # In these cases _create_query() will fail to find a valid SOA RR to
         # insert in the IXFR query, and should raise an exception.
 
-        self.conn._zone_name = Name('no-such-zone.example')
+        self._set_test_zone(Name('no-such-zone.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('partial-match-zone.example')
+        self._set_test_zone(Name('partial-match-zone.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('no-soa.example')
+        self._set_test_zone(Name('no-soa.example'))
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
-        self.conn._zone_name = Name('dup-soa.example')
+        self._set_test_zone(Name('dup-soa.example'))
+        self.conn._zone_soa = self.conn._get_zone_soa()
         self.assertRaises(XfrinException, self.conn._create_query,
                           RRType.IXFR())
 
@@ -830,8 +900,10 @@ class TestAXFR(TestXfrinConnection):
         self.conn._tsig_key = TSIG_KEY
         # server tsig check fail, return with RCODE 9 (NOTAUTH)
         self.conn._send_query(RRType.SOA())
-        self.conn.reply_data = self.conn.create_response_data(rcode=Rcode.NOTAUTH())
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.conn.reply_data = \
+            self.conn.create_response_data(rcode=Rcode.NOTAUTH())
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_without_end_soa(self):
         self.conn._send_query(RRType.AXFR())
@@ -844,7 +916,8 @@ class TestAXFR(TestXfrinConnection):
     def test_response_bad_qid(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_error_code_bad_sig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -855,7 +928,7 @@ class TestAXFR(TestXfrinConnection):
                 rcode=Rcode.SERVFAIL())
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADSIG",
                                self.conn._handle_xfrin_responses)
 
@@ -867,7 +940,7 @@ class TestAXFR(TestXfrinConnection):
         self.conn.reply_data = self.conn.create_response_data(bad_qid=True)
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADKEY",
                                self.conn._handle_xfrin_responses)
 
@@ -880,18 +953,21 @@ class TestAXFR(TestXfrinConnection):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(
             rcode=Rcode.SERVFAIL())
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_multi_question(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(
             questions=[example_axfr_question, example_axfr_question])
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_response_non_response(self):
         self.conn._send_query(RRType.AXFR())
         self.conn.reply_data = self.conn.create_response_data(response = False)
-        self.assertRaises(XfrinException, self.conn._handle_xfrin_responses)
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
 
     def test_soacheck(self):
         # we need to defer the creation until we know the QID, which is
@@ -906,7 +982,7 @@ class TestAXFR(TestXfrinConnection):
     def test_soacheck_badqid(self):
         self.soa_response_params['bad_qid'] = True
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_bad_qid_bad_sig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -916,19 +992,123 @@ class TestAXFR(TestXfrinConnection):
         self.conn.response_generator = self._create_soa_response_data
         # xfrin should check TSIG before other part of incoming message
         # validate log message for XfrinException
-        self.__match_exception(XfrinException,
+        self.__match_exception(XfrinProtocolError,
                                "TSIG verify fail: BADSIG",
                                self.conn._check_soa_serial)
 
     def test_soacheck_non_response(self):
         self.soa_response_params['response'] = False
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_error_code(self):
         self.soa_response_params['rcode'] = Rcode.SERVFAIL()
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_notauth(self):
+        self.soa_response_params['auth'] = False
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate(self):
+        # Primary's SOA serial is identical the local serial
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate2(self):
+        # Primary's SOA serial is "smaller" than the local serial
+        self.soa_response_params['answers'] = [create_soa(1229)]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_uptodate3(self):
+        # Similar to the previous case, but checking the comparison is based
+        # on the serial number arithmetic.
+        self.soa_response_params['answers'] = [create_soa(0xffffffff)]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertRaises(XfrinZoneUptodate, self.conn._check_soa_serial)
+
+    def test_soacheck_newzone(self):
+        # Primary's SOA is 'old', but this secondary doesn't know anything
+        # about the zone yet, so it should accept it.
+        def response_generator():
+            # _request_serial is set in _check_soa_serial().  Reset it here.
+            self.conn._request_serial = None
+            self._create_soa_response_data()
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = response_generator
+        self.assertEqual(XFRIN_OK, self.conn._check_soa_serial())
+
+    def test_soacheck_question_empty(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = []
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_name_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(Name('example.org'),
+                                                          TEST_RRCLASS,
+                                                          RRType.SOA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_class_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+                                                          RRClass.CH(),
+                                                          RRType.SOA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_question_type_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['questions'] = [Question(TEST_ZONE_NAME,
+                                                          TEST_RRCLASS,
+                                                          RRType.AAAA())]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_no_soa(self):
+        # The response just doesn't contain SOA without any other indication
+        # of errors.
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_soa_name_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = [create_soa(1234,
+                                                          Name('example.org'))]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_soa_class_mismatch(self):
+        self.conn.response_generator = self._create_soa_response_data
+        soa = RRset(TEST_ZONE_NAME, RRClass.CH(), RRType.SOA(), RRTTL(0))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.CH(), 'm. r. 1234 0 0 0 0'))
+        self.soa_response_params['answers'] = [soa]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_multiple_soa(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = [soa_rrset, soa_rrset]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_cname_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        # Add SOA to answer, too, to make sure that it that deceives the parser
+        self.soa_response_params['answers'] = [soa_rrset, create_cname()]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_referral_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.soa_response_params['authorities'] = [create_ns('ns.example.com')]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
+
+    def test_soacheck_nodata_response(self):
+        self.conn.response_generator = self._create_soa_response_data
+        self.soa_response_params['answers'] = []
+        self.soa_response_params['authorities'] = [soa_rrset]
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig(self):
         # Use a mock tsig context emulating a validly signed response
@@ -947,7 +1127,7 @@ class TestAXFR(TestXfrinConnection):
         self.soa_response_params['rcode'] = Rcode.NOTAUTH()
         self.conn.response_generator = self._create_soa_response_data
 
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig_noerror_badsig(self):
         self.conn._tsig_key = TSIG_KEY
@@ -960,7 +1140,7 @@ class TestAXFR(TestXfrinConnection):
         # treat this as a final failure (just as BIND 9 does).
         self.conn.response_generator = self._create_soa_response_data
 
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_tsig_unsigned_response(self):
         # we can use a real TSIGContext for this.  the response doesn't
@@ -969,14 +1149,14 @@ class TestAXFR(TestXfrinConnection):
         # it as a fatal transaction failure, too.
         self.conn._tsig_key = TSIG_KEY
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_soacheck_with_unexpected_tsig_response(self):
         # we reject unexpected TSIG in responses (following BIND 9's
         # behavior)
         self.soa_response_params['tsig'] = True
         self.conn.response_generator = self._create_soa_response_data
-        self.assertRaises(XfrinException, self.conn._check_soa_serial)
+        self.assertRaises(XfrinProtocolError, self.conn._check_soa_serial)
 
     def test_response_shutdown(self):
         self.conn.response_generator = self._create_normal_response_data
@@ -1238,6 +1418,18 @@ class TestAXFR(TestXfrinConnection):
         self.conn.response_generator = self._create_soa_response_data
         self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
 
+    def test_do_soacheck_uptodate(self):
+        self.soa_response_params['answers'] = [begin_soa_rrset]
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_OK)
+
+    def test_do_soacheck_protocol_error(self):
+        # There are several cases, but at this level it's sufficient to check
+        # only one.  We use the case where there's no SOA in the response.
+        self.soa_response_params['answers'] = []
+        self.conn.response_generator = self._create_soa_response_data
+        self.assertEqual(self.conn.do_xfrin(True), XFRIN_FAIL)
+
     def test_do_soacheck_and_xfrin_with_tsig(self):
         # We are going to have a SOA query/response transaction, followed by
         # AXFR, all TSIG signed.  xfrin should use a new TSIG context for
@@ -1270,9 +1462,8 @@ class TestIXFRResponse(TestXfrinConnection):
     def setUp(self):
         super().setUp()
         self.conn._query_id = self.conn.qid = 1035
-        self.conn._request_serial = 1230
+        self.conn._request_serial = isc.dns.Serial(1230)
         self.conn._request_type = RRType.IXFR()
-        self._zone_name = TEST_ZONE_NAME
         self.conn._datasrc_client = MockDataSourceClient()
         XfrinInitialSOA().set_xfrstate(self.conn, XfrinInitialSOA())
 
@@ -1347,6 +1538,16 @@ class TestIXFRResponse(TestXfrinConnection):
                     [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
                     self.conn._datasrc_client.committed_diffs)
 
+    def test_ixfr_response_uptodate(self):
+        '''IXFR response indicates the zone is new enough'''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[begin_soa_rrset])
+        self.assertRaises(XfrinZoneUptodate, self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
     def test_ixfr_response_broken(self):
         '''Test with a broken response.
 
@@ -1379,6 +1580,22 @@ class TestIXFRResponse(TestXfrinConnection):
                     [[('delete', begin_soa_rrset), ('add', soa_rrset)]],
                     self.conn._datasrc_client.committed_diffs)
 
+    def test_ixfr_response_uptodate_extra(self):
+        '''Similar to 'uptodate' test, but with extra bogus data.
+
+        In either case an exception will be raised, but in this case it's
+        considered an error.
+
+        '''
+        self.conn.reply_data = self.conn.create_response_data(
+            questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS, RRType.IXFR())],
+            answers=[begin_soa_rrset, soa_rrset])
+        self.assertRaises(XfrinProtocolError,
+                          self.conn._handle_xfrin_responses)
+        # no diffs should have been committed
+        check_diffs(self.assertEqual,
+                    [], self.conn._datasrc_client.committed_diffs)
+
     def test_ixfr_to_axfr_response(self):
         '''AXFR-style IXFR response.
 
@@ -1482,13 +1699,25 @@ class TestIXFRSession(TestXfrinConnection):
         self.conn.response_generator = create_ixfr_response
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
 
-    def test_do_xfrin_fail(self):
+    def test_do_xfrin_fail2(self):
         '''IXFR fails due to a bogus DNS message.
 
         '''
         self._create_broken_response_data()
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
 
+    def test_do_xfrin_uptodate(self):
+        '''IXFR is (gracefully) aborted because serial is not new
+
+        '''
+        def create_response():
+            self.conn.reply_data = self.conn.create_response_data(
+                questions=[Question(TEST_ZONE_NAME, TEST_RRCLASS,
+                                    RRType.IXFR())],
+                answers=[begin_soa_rrset])
+        self.conn.response_generator = create_response
+        self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
+
 class TestXFRSessionWithSQLite3(TestXfrinConnection):
     '''Tests for XFR sessions using an SQLite3 DB.
 
@@ -1522,8 +1751,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
     def get_zone_serial(self):
         result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
         self.assertEqual(DataSourceClient.SUCCESS, result)
-        result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA(),
-                                  None, ZoneFinder.FIND_DEFAULT)
+        result, soa = finder.find(TEST_ZONE_NAME, RRType.SOA())
         self.assertEqual(ZoneFinder.SUCCESS, result)
         self.assertEqual(1, soa.get_rdata_count())
         return get_soa_serial(soa.get_rdata()[0])
@@ -1531,7 +1759,7 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
     def record_exist(self, name, type):
         result, finder = self.conn._datasrc_client.find_zone(TEST_ZONE_NAME)
         self.assertEqual(DataSourceClient.SUCCESS, result)
-        result, soa = finder.find(name, type, None, ZoneFinder.FIND_DEFAULT)
+        result, soa = finder.find(name, type)
         return result == ZoneFinder.SUCCESS
 
     def test_do_ixfrin_sqlite3(self):
@@ -1543,9 +1771,9 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
         self.conn.response_generator = create_ixfr_response
 
         # Confirm xfrin succeeds and SOA is updated
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.IXFR()))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
 
         # Also confirm the corresponding diffs are stored in the diffs table
         conn = sqlite3.connect(self.sqlite3db_obj)
@@ -1574,12 +1802,12 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                          self._create_soa('1235')])
         self.conn.response_generator = create_ixfr_response
 
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
 
     def test_do_ixfrin_nozone_sqlite3(self):
-        self.conn._zone_name = Name('nosuchzone.example')
+        self._set_test_zone(Name('nosuchzone.example'))
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, RRType.IXFR()))
         # This should fail even before starting state transition
         self.assertEqual(None, self.conn.get_xfrstate())
@@ -1595,11 +1823,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
         self.conn.response_generator = create_response
 
         # Confirm xfrin succeeds and SOA is updated, A RR is deleted.
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, type))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
         self.assertFalse(self.record_exist(Name('dns01.example.com'),
                                            RRType.A()))
 
@@ -1627,11 +1855,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                 answers=[soa_rrset, self._create_ns(), soa_rrset, soa_rrset])
         self.conn.response_generator = create_response
 
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
         self.assertEqual(XFRIN_FAIL, self.conn.do_xfrin(False, type))
-        self.assertEqual(1230, self.get_zone_serial())
+        self.assertEqual(1230, self.get_zone_serial().get_value())
         self.assertTrue(self.record_exist(Name('dns01.example.com'),
                                           RRType.A()))
 
@@ -1665,11 +1893,11 @@ class TestXFRSessionWithSQLite3(TestXfrinConnection):
                                     RRType.AXFR())],
                 answers=[soa_rrset, self._create_ns(), soa_rrset])
         self.conn.response_generator = create_response
-        self.conn._zone_name = Name('example.com')
+        self._set_test_zone(Name('example.com'))
         self.assertEqual(XFRIN_OK, self.conn.do_xfrin(False, RRType.AXFR()))
         self.assertEqual(type(XfrinAXFREnd()),
                          type(self.conn.get_xfrstate()))
-        self.assertEqual(1234, self.get_zone_serial())
+        self.assertEqual(1234, self.get_zone_serial().get_value())
         self.assertFalse(self.record_exist(Name('dns01.example.com'),
                                            RRType.A()))
 
diff --git a/src/bin/xfrin/xfrin.py.in b/src/bin/xfrin/xfrin.py.in
index 445683e..1167bef 100755
--- a/src/bin/xfrin/xfrin.py.in
+++ b/src/bin/xfrin/xfrin.py.in
@@ -24,6 +24,7 @@ import struct
 import threading
 import socket
 import random
+from functools import reduce
 from optparse import OptionParser, OptionValueError
 from isc.config.ccsession import *
 from isc.notify import notify_out
@@ -75,9 +76,10 @@ DEFAULT_MASTER_PORT = 53
 DEFAULT_ZONE_CLASS = RRClass.IN()
 
 __version__ = 'BIND10'
-# define xfrin rcode
-XFRIN_OK = 0
-XFRIN_FAIL = 1
+
+# Internal result codes of an xfr session
+XFRIN_OK = 0                    # normal success
+XFRIN_FAIL = 1                  # general failure (internal/external)
 
 class XfrinException(Exception):
     pass
@@ -87,6 +89,11 @@ class XfrinProtocolError(Exception):
     '''
     pass
 
+class XfrinZoneUptodate(Exception):
+    '''TBD
+    '''
+    pass
+
 class XfrinZoneInfoException(Exception):
     """This exception is raised if there is an error in the given
        configuration (part), or when a command does not have a required
@@ -153,7 +160,7 @@ def format_addrinfo(addrinfo):
                         "appear to be consisting of (family, socktype, (addr, port))")
 
 def get_soa_serial(soa_rdata):
-    '''Extract the serial field of an SOA RDATA and returns it as an intger.
+    '''Extract the serial field of SOA RDATA and return it as a Serial object.
 
     We don't have to be very efficient here, so we first dump the entire RDATA
     as a string and convert the first corresponding field.  This should be
@@ -162,7 +169,7 @@ def get_soa_serial(soa_rdata):
     should be a more direct and convenient way to get access to the SOA
     fields.
     '''
-    return int(soa_rdata.to_text().split()[2])
+    return Serial(int(soa_rdata.to_text().split()[2]))
 
 class XfrinState:
     '''
@@ -181,12 +188,12 @@ class XfrinState:
                              (AXFR or
             (recv SOA)        AXFR-style IXFR)  (SOA, add)
     InitialSOA------->FirstData------------->AXFR--------->AXFREnd
-                          |                  |  ^         (post xfr
-                          |                  |  |        checks, then
-                          |                  +--+        commit)
-                          |            (non SOA, add)
-                          |
-                          |                     (non SOA, delete)
+         |                |                  |  ^         (post xfr
+         |(IXFR &&        |                  |  |        checks, then
+         | recv SOA       |                  +--+        commit)
+         | not new)       |            (non SOA, add)
+         V                |
+    IXFRUptodate          |                     (non SOA, delete)
                (pure IXFR,|                           +-------+
             keep handling)|             (Delete SOA)  V       |
                           + ->IXFRDeleteSOA------>IXFRDelete--+
@@ -300,13 +307,14 @@ class XfrinInitialSOA(XfrinState):
                                      + rr.get_type().to_text() + ' received)')
         conn._end_serial = get_soa_serial(rr.get_rdata()[0])
 
-        # FIXME: we need to check the serial is actually greater than ours.
-        # To do so, however, we need to implement serial number arithmetic.
-        # Although it wouldn't be a big task, we'll leave it for a separate
-        # task for now.  (Always performing xfr could be inefficient, but
-        # shouldn't do any harm otherwise)
+        if conn._request_type == RRType.IXFR() and \
+                conn._end_serial <= conn._request_serial:
+            logger.info(XFRIN_IXFR_UPTODATE, conn.zone_str(),
+                        conn._request_serial, conn._end_serial)
+            self.set_xfrstate(conn, XfrinIXFRUptodate())
+        else:
+            self.set_xfrstate(conn, XfrinFirstData())
 
-        self.set_xfrstate(conn, XfrinFirstData())
         return True
 
 class XfrinFirstData(XfrinState):
@@ -430,6 +438,14 @@ class XfrinIXFREnd(XfrinState):
         '''
         return False
 
+class XfrinIXFRUptodate(XfrinState):
+    def handle_rr(self, conn, rr):
+        raise XfrinProtocolError('Extra data after single IXFR response ' +
+                                 rr.to_text())
+
+    def finish_message(self, conn):
+        raise XfrinZoneUptodate
+
 class XfrinAXFR(XfrinState):
     def handle_rr(self, conn, rr):
         """
@@ -473,10 +489,13 @@ class XfrinConnection(asyncore.dispatcher):
 
     def __init__(self,
                  sock_map, zone_name, rrclass, datasrc_client,
-                 shutdown_event, master_addrinfo, tsig_key=None,
+                 shutdown_event, master_addrinfo, db_file, tsig_key=None,
                  idle_timeout=60):
         '''Constructor of the XfirnConnection class.
 
+        db_file: SQLite3 DB file.  Unforutnately we still need this for
+                 temporary workaround in _get_zone_soa().  This should be
+                 removed when we eliminate the need for the workaround.
         idle_timeout: max idle time for read data from socket.
         datasrc_client: the data source client object used for the XFR session.
                         This will eventually replace db_file completely.
@@ -500,7 +519,9 @@ class XfrinConnection(asyncore.dispatcher):
         self._rrclass = rrclass
 
         # Data source handler
+        self._db_file = db_file
         self._datasrc_client = datasrc_client
+        self._zone_soa = self._get_zone_soa()
 
         self._sock_map = sock_map
         self._soa_rr_count = 0
@@ -524,6 +545,55 @@ class XfrinConnection(asyncore.dispatcher):
         self.create_socket(self._master_addrinfo[0], self._master_addrinfo[1])
         self.setblocking(1)
 
+    def _get_zone_soa(self):
+        '''Retrieve the current SOA RR of the zone to be transferred.
+
+        It will be used for various purposes in subsequent xfr protocol
+        processing.   It is validly possible that the zone is currently
+        empty and therefore doesn't have an SOA, so this method doesn't
+        consider it an error and returns None in such a case.  It may or
+        may not result in failure in the actual processing depending on
+        how the SOA is used.
+
+        When the zone has an SOA RR, this method makes sure that it's
+        valid, i.e., it has exactly one RDATA; if it is not the case
+        this method returns None.
+
+        If the underlying data source doesn't even know the zone, this method
+        tries to provide backward compatible behavior where xfrin is
+        responsible for creating zone in the corresponding DB table.
+        For a longer term we should deprecate this behavior by introducing
+        more generic zone management framework, but at the moment we try
+        to not surprise existing users.  (Note also that the part of
+        providing the compatible behavior uses the old data source API.
+        We'll deprecate this API in a near future, too).
+
+        '''
+        # get the zone finder.  this must be SUCCESS (not even
+        # PARTIALMATCH) because we are specifying the zone origin name.
+        result, finder = self._datasrc_client.find_zone(self._zone_name)
+        if result != DataSourceClient.SUCCESS:
+            # The data source doesn't know the zone.  For now, we provide
+            # backward compatibility and creates a new one ourselves.
+            isc.datasrc.sqlite3_ds.load(self._db_file,
+                                        self._zone_name.to_text(),
+                                        lambda : [])
+            logger.warn(XFRIN_ZONE_CREATED, self.zone_str())
+            # try again
+            result, finder = self._datasrc_client.find_zone(self._zone_name)
+        if result != DataSourceClient.SUCCESS:
+            return None
+        result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
+                                        None, ZoneFinder.FIND_DEFAULT)
+        if result != ZoneFinder.SUCCESS:
+            logger.info(XFRIN_ZONE_NO_SOA, self.zone_str())
+            return None
+        if soa_rrset.get_rdata_count() != 1:
+            logger.warn(XFRIN_ZONE_MULTIPLE_SOA, self.zone_str(),
+                        soa_rrset.get_rdata_count())
+            return None
+        return soa_rrset
+
     def __set_xfrstate(self, new_state):
         self.__state = new_state
 
@@ -545,37 +615,16 @@ class XfrinConnection(asyncore.dispatcher):
                          str(e))
             return False
 
-    def _get_zone_soa(self):
-        result, finder = self._datasrc_client.find_zone(self._zone_name)
-        if result != DataSourceClient.SUCCESS:
-            raise XfrinException('Zone not found in the given data ' +
-                                 'source: ' + self.zone_str())
-        result, soa_rrset = finder.find(self._zone_name, RRType.SOA(),
-                                        None, ZoneFinder.FIND_DEFAULT)
-        if result != ZoneFinder.SUCCESS:
-            raise XfrinException('SOA RR not found in zone: ' +
-                                 self.zone_str())
-        # Especially for database-based zones, a working zone may be in
-        # a broken state where it has more than one SOA RR.  We proactively
-        # check the condition and abort the xfr attempt if we identify it.
-        if soa_rrset.get_rdata_count() != 1:
-            raise XfrinException('Invalid number of SOA RRs for ' +
-                                 self.zone_str() + ': ' +
-                                 str(soa_rrset.get_rdata_count()))
-        return soa_rrset
-
     def _create_query(self, query_type):
         '''Create an XFR-related query message.
 
-        query_type is either SOA, AXFR or IXFR.  For type IXFR, it searches
-        the associated data source for the current SOA record to include
-        it in the query.  If the corresponding zone or the SOA record
-        cannot be found, it raises an XfrinException exception.  Note that
-        this may not necessarily a broken configuration; for the first attempt
-        of transfer the secondary may not have any boot-strap zone
-        information, in which case IXFR simply won't work.  The xfrin
-        should then fall back to AXFR.  _request_serial is recorded for
-        later use.
+        query_type is either SOA, AXFR or IXFR.  An IXFR query needs the
+        zone's current SOA record.  If it's not known, it raises an
+        XfrinException exception.  Note that this may not necessarily a
+        broken configuration; for the first attempt of transfer the secondary
+        may not have any boot-strap zone information, in which case IXFR
+        simply won't work.  The xfrin should then fall back to AXFR.
+        _request_serial is recorded for later use.
 
         '''
         msg = Message(Message.RENDER)
@@ -585,27 +634,19 @@ class XfrinConnection(asyncore.dispatcher):
         msg.set_opcode(Opcode.QUERY())
         msg.set_rcode(Rcode.NOERROR())
         msg.add_question(Question(self._zone_name, self._rrclass, query_type))
+
+        # Remember our serial, if known
+        self._request_serial = get_soa_serial(self._zone_soa.get_rdata()[0]) \
+            if self._zone_soa is not None else None
+
+        # Set the authority section with our SOA for IXFR
         if query_type == RRType.IXFR():
-            # get the zone finder.  this must be SUCCESS (not even
-            # PARTIALMATCH) because we are specifying the zone origin name.
-            zone_soa_rr = self._get_zone_soa()
-            msg.add_rrset(Message.SECTION_AUTHORITY, zone_soa_rr)
-            self._request_serial = get_soa_serial(zone_soa_rr.get_rdata()[0])
-        else:
-            # For AXFR, we temporarily provide backward compatible behavior
-            # where xfrin is responsible for creating zone in the corresponding
-            # DB table.  Note that the code below uses the old data source
-            # API and assumes SQLite3 in an ugly manner.  We'll have to
-            # develop a better way of managing zones in a generic way and
-            # eliminate the code like the one here.
-            try:
-                self._get_zone_soa()
-            except XfrinException:
-                def empty_rr_generator():
-                    return []
-                isc.datasrc.sqlite3_ds.load(self._db_file,
-                                            self._zone_name.to_text(),
-                                            empty_rr_generator)
+            if self._zone_soa is None:
+                # (incremental) IXFR doesn't work without known SOA
+                raise XfrinException('Failed to create IXFR query due to no ' +
+                                     'SOA for ' + self.zone_str())
+            msg.add_rrset(Message.SECTION_AUTHORITY, self._zone_soa)
+
         return msg
 
     def _send_data(self, data):
@@ -659,7 +700,8 @@ class XfrinConnection(asyncore.dispatcher):
         if self._tsig_ctx is not None:
             tsig_error = self._tsig_ctx.verify(tsig_record, response_data)
             if tsig_error != TSIGError.NOERROR:
-                raise XfrinException('TSIG verify fail: %s' % str(tsig_error))
+                raise XfrinProtocolError('TSIG verify fail: %s' %
+                                         str(tsig_error))
         elif tsig_record is not None:
             # If the response includes a TSIG while we didn't sign the query,
             # we treat it as an error.  RFC doesn't say anything about this
@@ -668,13 +710,78 @@ class XfrinConnection(asyncore.dispatcher):
             # implementation would return such a response, and since this is
             # part of security mechanism, it's probably better to be more
             # strict.
-            raise XfrinException('Unexpected TSIG in response')
+            raise XfrinProtocolError('Unexpected TSIG in response')
+
+    def __parse_soa_response(self, msg, response_data):
+        '''Parse a response to SOA query and extract the SOA from answer.
+
+        This is a subroutine of _check_soa_serial().  This method also
+        validates message, and rejects bogus responses with XfrinProtocolError.
+
+        If everything is okay, it returns the SOA RR from the answer section
+        of the response.
+
+        '''
+        # Check TSIG integrity and validate the header.  Unlike AXFR/IXFR,
+        # we should be more strict for SOA queries and check the AA flag, too.
+        self._check_response_tsig(msg, response_data)
+        self._check_response_header(msg)
+        if not msg.get_header_flag(Message.HEADERFLAG_AA):
+            raise XfrinProtocolError('non-authoritative answer to SOA query')
+
+        # Validate the question section
+        n_question = msg.get_rr_count(Message.SECTION_QUESTION)
+        if n_question != 1:
+            raise XfrinProtocolError('Invalid response to SOA query: ' +
+                                     '(' + str(n_question) + ' questions, 1 ' +
+                                     'expected)')
+        resp_question = msg.get_question()[0]
+        if resp_question.get_name() != self._zone_name or \
+                resp_question.get_class() != self._rrclass or \
+                resp_question.get_type() != RRType.SOA():
+            raise XfrinProtocolError('Invalid response to SOA query: '
+                                     'question mismatch: ' +
+                                     str(resp_question))
+
+        # Look into the answer section for SOA
+        soa = None
+        for rr in msg.get_section(Message.SECTION_ANSWER):
+            if rr.get_type() == RRType.SOA():
+                if soa is not None:
+                    raise XfrinProtocolError('SOA response had multiple SOAs')
+                soa = rr
+            # There should not be a CNAME record at top of zone.
+            if rr.get_type() == RRType.CNAME():
+                raise XfrinProtocolError('SOA query resulted in CNAME')
+
+        # If SOA is not found, try to figure out the reason then report it.
+        if soa is None:
+            # See if we have any SOA records in the authority section.
+            for rr in msg.get_section(Message.SECTION_AUTHORITY):
+                if rr.get_type() == RRType.NS():
+                    raise XfrinProtocolError('SOA query resulted in referral')
+                if rr.get_type() == RRType.SOA():
+                    raise XfrinProtocolError('SOA query resulted in NODATA')
+            raise XfrinProtocolError('No SOA record found in response to ' +
+                                     'SOA query')
+
+        # Check if the SOA is really what we asked for
+        if soa.get_name() != self._zone_name or \
+                soa.get_class() != self._rrclass:
+            raise XfrinProtocolError("SOA response doesn't match query: " +
+                                     str(soa))
+
+        # All okay, return it
+        return soa
+
 
     def _check_soa_serial(self):
-        ''' Compare the soa serial, if soa serial in master is less than
-        the soa serial in local, Finish xfrin.
-        False: soa serial in master is less or equal to the local one.
-        True: soa serial in master is bigger
+        '''Send SOA query and compare the local and remote serials.
+
+        If we know our local serial and the remote serial isn't newer
+        than ours, we abort the session with XfrinZoneUptodate.
+        On success it returns XFRIN_OK for testing.  The caller won't use it.
+
         '''
 
         self._send_query(RRType.SOA())
@@ -682,18 +789,23 @@ class XfrinConnection(asyncore.dispatcher):
         msg_len = socket.htons(struct.unpack('H', data_len)[0])
         soa_response = self._get_request_response(msg_len)
         msg = Message(Message.PARSE)
-        msg.from_wire(soa_response)
+        msg.from_wire(soa_response, Message.PRESERVE_ORDER)
+
+        # Validate/parse the rest of the response, and extract the SOA
+        # from the answer section
+        soa = self.__parse_soa_response(msg, soa_response)
+
+        # Compare the two serials.  If ours is 'new', abort with ZoneUptodate.
+        primary_serial = get_soa_serial(soa.get_rdata()[0])
+        if self._request_serial is not None and \
+                self._request_serial >= primary_serial:
+            if self._request_serial != primary_serial:
+                logger.info(XFRIN_ZONE_SERIAL_AHEAD, primary_serial,
+                            self.zone_str(),
+                            format_addrinfo(self._master_addrinfo),
+                            self._request_serial)
+            raise XfrinZoneUptodate
 
-        # TSIG related checks, including an unexpected signed response
-        self._check_response_tsig(msg, soa_response)
-
-        # perform some minimal level validation.  It's an open issue how
-        # strict we should be (see the comment in _check_response_header())
-        self._check_response_header(msg)
-
-        # TODO, need select soa record from data source then compare the two
-        # serial, current just return OK, since this function hasn't been used
-        # now.
         return XFRIN_OK
 
     def do_xfrin(self, check_soa, request_type=RRType.AXFR()):
@@ -704,22 +816,30 @@ class XfrinConnection(asyncore.dispatcher):
             self._request_type = request_type
             # Right now RRType.[IA]XFR().to_text() is 'TYPExxx', so we need
             # to hardcode here.
-            request_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
+            req_str = 'IXFR' if request_type == RRType.IXFR() else 'AXFR'
             if check_soa:
-                ret =  self._check_soa_serial()
-
-            if ret == XFRIN_OK:
-                logger.info(XFRIN_XFR_TRANSFER_STARTED, request_str,
-                            self.zone_str())
-                self._send_query(self._request_type)
-                self.__state = XfrinInitialSOA()
-                self._handle_xfrin_responses()
-                logger.info(XFRIN_XFR_TRANSFER_SUCCESS, request_str,
-                            self.zone_str())
-
-        except (XfrinException, XfrinProtocolError) as e:
-            logger.error(XFRIN_XFR_TRANSFER_FAILURE, request_str,
-                         self.zone_str(), str(e))
+                self._check_soa_serial()
+
+            logger.info(XFRIN_XFR_TRANSFER_STARTED, req_str, self.zone_str())
+            self._send_query(self._request_type)
+            self.__state = XfrinInitialSOA()
+            self._handle_xfrin_responses()
+            logger.info(XFRIN_XFR_TRANSFER_SUCCESS, req_str, self.zone_str())
+
+        except XfrinZoneUptodate:
+            # Eventually we'll probably have to treat this case as a trigger
+            # of trying another primary server, etc, but for now we treat it
+            # as "success".
+            pass
+        except XfrinProtocolError as e:
+            logger.info(XFRIN_XFR_TRANSFER_PROTOCOL_ERROR, req_str,
+                        self.zone_str(),
+                        format_addrinfo(self._master_addrinfo), str(e))
+            ret = XFRIN_FAIL
+        except XfrinException as e:
+            logger.error(XFRIN_XFR_TRANSFER_FAILURE, req_str,
+                         self.zone_str(),
+                         format_addrinfo(self._master_addrinfo), str(e))
             ret = XFRIN_FAIL
         except Exception as e:
             # Catching all possible exceptions like this is generally not a
@@ -730,7 +850,7 @@ class XfrinConnection(asyncore.dispatcher):
             # catch it here, but until then we need broadest coverage so that
             # we won't miss anything.
 
-            logger.error(XFRIN_XFR_OTHER_FAILURE, request_str,
+            logger.error(XFRIN_XFR_OTHER_FAILURE, req_str,
                          self.zone_str(), str(e))
             ret = XFRIN_FAIL
         finally:
@@ -754,13 +874,14 @@ class XfrinConnection(asyncore.dispatcher):
 
         msg_rcode = msg.get_rcode()
         if msg_rcode != Rcode.NOERROR():
-            raise XfrinException('error response: %s' % msg_rcode.to_text())
+            raise XfrinProtocolError('error response: %s' %
+                                     msg_rcode.to_text())
 
         if not msg.get_header_flag(Message.HEADERFLAG_QR):
-            raise XfrinException('response is not a response')
+            raise XfrinProtocolError('response is not a response')
 
         if msg.get_qid() != self._query_id:
-            raise XfrinException('bad query id')
+            raise XfrinProtocolError('bad query id')
 
     def _check_response_status(self, msg):
         '''Check validation of xfr response. '''
@@ -768,7 +889,7 @@ class XfrinConnection(asyncore.dispatcher):
         self._check_response_header(msg)
 
         if msg.get_rr_count(Message.SECTION_QUESTION) > 1:
-            raise XfrinException('query section count greater than 1')
+            raise XfrinProtocolError('query section count greater than 1')
 
     def _handle_xfrin_responses(self):
         read_next_msg = True
@@ -808,8 +929,8 @@ class XfrinConnection(asyncore.dispatcher):
         return False
 
 def __process_xfrin(server, zone_name, rrclass, db_file,
-                  shutdown_event, master_addrinfo, check_soa, tsig_key,
-                  request_type, conn_class):
+                    shutdown_event, master_addrinfo, check_soa, tsig_key,
+                    request_type, conn_class):
     conn = None
     exception = None
     ret = XFRIN_FAIL
@@ -840,11 +961,9 @@ def __process_xfrin(server, zone_name, rrclass, db_file,
         while retry:
             retry = False
             conn = conn_class(sock_map, zone_name, rrclass, datasrc_client,
-                              shutdown_event, master_addrinfo, tsig_key)
+                              shutdown_event, master_addrinfo, db_file,
+                              tsig_key)
             conn.init_socket()
-            # XXX: We still need _db_file for temporary workaround in _create_query().
-            # This should be removed when we eliminate the need for the workaround.
-            conn._db_file = db_file
             ret = XFRIN_FAIL
             if conn.connect_to_master():
                 ret = conn.do_xfrin(check_soa, request_type)
diff --git a/src/bin/xfrin/xfrin_messages.mes b/src/bin/xfrin/xfrin_messages.mes
index 86cdec3..5e182d8 100644
--- a/src/bin/xfrin/xfrin_messages.mes
+++ b/src/bin/xfrin/xfrin_messages.mes
@@ -15,18 +15,63 @@
 # No namespace declaration - these constants go in the global namespace
 # of the xfrin messages python module.
 
+% XFRIN_ZONE_CREATED Zone %1 not found in the given data source, newly created
+On starting an xfrin session, it is identified that the zone to be
+transferred is not found in the data source.  This can happen if a
+secondary DNS server first tries to perform AXFR from a primary server
+without creating the zone image beforehand (e.g. by b10-loadzone).  As
+of this writing the xfrin process provides backward compatible
+behavior to previous versions: creating a new one in the data source
+not to surprise existing users too much.  This is probably not a good
+idea, however, in terms of who should be responsible for managing
+zones at a higher level.  In future it is more likely that a separate
+zone management framework is provided, and the situation where the
+given zone isn't found in xfrout will be treated as an error.
+
+% XFRIN_ZONE_NO_SOA Zone %1 does not have SOA
+On starting an xfrin session, it is identified that the zone to be
+transferred does not have an SOA RR in the data source.  This is not
+necessarily an error; if a secondary DNS server first tries to perform
+transfer from a primary server, the zone can be empty, and therefore
+doesn't have an SOA.  Subsequent AXFR will fill in the zone; if the
+attempt is IXFR it will fail in query creation.
+
+% XFRIN_ZONE_MULTIPLE_SOA Zone %1 has %2 SOA RRs
+On starting an xfrin session, it is identified that the zone to be
+transferred has multiple SOA RRs.  Such a zone is broken, but could be
+accidentally configured especially in a data source using "non
+captive" backend database.  The implementation ignores entire SOA RRs
+and tries to continue processing as if the zone were empty.  This
+means subsequent AXFR can succeed and possibly replace the zone with
+valid content, but an IXFR attempt will fail.
+
+% XFRIN_ZONE_SERIAL_AHEAD Serial number (%1) for %2 received from master %3 < ours (%4)
+The response to an SOA query prior to xfr indicated that the zone's
+SOA serial at the primary server is smaller than that of the xfrin
+client.  This is not necessarily an error especially if that
+particular primary server is another secondary server which hasn't got
+the latest version of the zone.  But if the primary server is known to
+be the real source of the zone, some unexpected inconsistency may have
+happened, and you may want to take a closer look.  In this case xfrin
+doesn't perform subsequent zone transfer.
+
 % XFRIN_XFR_OTHER_FAILURE %1 transfer of zone %2 failed: %3
 The XFR transfer for the given zone has failed due to a problem outside
 of the xfrin module.  Possible reasons are a broken DNS message or failure
 in database connection.  The error is shown in the log message.
 
-% XFRIN_AXFR_DATABASE_FAILURE AXFR transfer of zone %1 failed: %2
-The AXFR transfer for the given zone has failed due to a database problem.
-The error is shown in the log message.  Note: due to the code structure
-this can only happen for AXFR.
-
-% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 failed: %3
-The XFR transfer for the given zone has failed due to a protocol error.
+% XFRIN_XFR_TRANSFER_PROTOCOL_ERROR %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to a protocol
+error, such as an unexpected response from the primary server.  The
+error is shown in the log message.  It may be because the primary
+server implementation is broken or (although less likely) there was
+some attack attempt, but it can also happen due to configuration
+mismatch such as the remote server does not have authority for the
+zone any more but the local configuration hasn't been updated.  So it
+is recommended to check the primary server configuration.
+
+% XFRIN_XFR_TRANSFER_FAILURE %1 transfer of zone %2 with %3 failed: %4
+The XFR transfer for the given zone has failed due to an internal error.
 The error is shown in the log message.
 
 % XFRIN_XFR_TRANSFER_FALLBACK falling back from IXFR to AXFR for %1
@@ -118,6 +163,16 @@ daemon will now shut down.
 An uncaught exception was raised while running the xfrin daemon. The
 exception message is printed in the log message.
 
+% XFRIN_IXFR_UPTODATE IXFR requested serial for %1 is %2, master has %3, not updating
+The first SOA record in an IXFR response indicates the zone's serial
+at the primary server is not newer than the client's.  This is
+basically unexpected event because normally the client first checks
+the SOA serial by an SOA query, but can still happen if the transfer
+is manually invoked or (although unlikely) there is a rapid change at
+the primary server between the SOA and IXFR queries.  The client
+implementation confirms the whole response is this single SOA, and
+aborts the transfer just like a successful case.
+
 % XFRIN_GOT_INCREMENTAL_RESP got incremental response for %1
 In an attempt of IXFR processing, the begenning SOA of the first difference
 (following the initial SOA that specified the final SOA for all the
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 37e8993..ea4de27 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -67,10 +67,12 @@ class MySocket():
         self.sendqueue = self.sendqueue[size:]
         return result
 
-    def read_msg(self, parse_options=Message.PARSE_DEFAULT):
+    def read_msg(self, parse_options=Message.PARSE_DEFAULT, need_len=False):
         sent_data = self.readsent()
         get_msg = Message(Message.PARSE)
         get_msg.from_wire(bytes(sent_data[2:]), parse_options)
+        if need_len:
+            return (get_msg, len(sent_data) - 2)
         return get_msg
 
     def clear_send(self):
@@ -93,7 +95,7 @@ class MockDataSrcClient:
             return (isc.datasrc.DataSourceClient.NOTFOUND, None)
         return (isc.datasrc.DataSourceClient.SUCCESS, self)
 
-    def find(self, name, rrtype, target, options):
+    def find(self, name, rrtype, target=None, options=ZoneFinder.FIND_DEFAULT):
         '''Mock ZoneFinder.find().
 
         (At the moment) this method only handles query for type SOA.
@@ -863,7 +865,150 @@ class TestXfroutSession(TestXfroutSessionBase):
 
         self.assertEqual(len(expected_records), len(actual_records))
         for (expected_rr, actual_rr) in zip(expected_records, actual_records):
-            self.assertTrue(expected_rr, actual_rr)
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+    def test_reply_xfrout_query_axfr_maxlen(self):
+        # The test RR(set) has the length of 65535 - 12 (size of hdr) bytes:
+        # owner name = 1 (root), fixed fields (type,class,TTL,RDLEN) = 10
+        # RDATA = 65512 (= 65535 - 12 - 1 - 10)
+        self.xfrsess._soa = self.soa_rrset
+        test_rr = create_generic(Name('.'), 65512)
+        self.xfrsess._iterator = [self.soa_rrset, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        # The first message should contain the beginning SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # The second message should contain the beginning SOA, and only that RR
+        # The wire format data should have the possible maximum size.
+        r, rlen = self.sock.read_msg(need_len=True)
+        self.assertEqual(65535, rlen)
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(test_rr,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # The third message should contain the ending SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+
+        # there should be no more message
+        self.assertEqual(0, len(self.sock.sendqueue))
+
+    def maxlen_test_common_setup(self, tsig=False):
+        '''Common initialization for some of the tests below
+
+        For those tests we use '.' for all owner names and names in RDATA
+        to avoid having unexpected results due to compression.  It returns
+        the created SOA for convenience.
+
+        If tsig is True, also setup TSIG (mock) context.  In our test cases
+        the size of the TSIG RR is 81 bytes (key name = example.com,
+        algorithm = hmac-md5)
+
+        '''
+        soa = RRset(Name('.'), RRClass.IN(), RRType.SOA(), RRTTL(3600))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(), '. . 0 0 0 0 0'))
+        self.mdata = self.create_request_data(zone_name=Name('.'))
+        self.xfrsess._soa = soa
+        if tsig:
+            self.xfrsess._tsig_ctx = \
+                self.create_mock_tsig_ctx(TSIGError.NOERROR)
+            self.xfrsess._tsig_len = 81
+        return soa
+
+    def maxlen_test_common_checks(self, soa_rr, test_rr, expected_n_rr):
+        '''A set of common assertion checks for some tests below.
+
+        In all cases two AXFR response messages should have been created.
+        expected_n_rr is a list of two elements, each specifies the expected
+        number of answer RRs for each message: expected_n_rr[0] is the expected
+        number of the first answer RRs; expected_n_rr[1] is the expected number
+        of the second answer RRs.  The message that contains two RRs should
+        have the maximum possible wire length (65535 bytes).  And, in all
+        cases, the resulting RRs should be in the order of SOA, another RR,
+        SOA.
+
+        '''
+        # Check the first message
+        r, rlen = self.sock.read_msg(need_len=True)
+        if expected_n_rr[0] == 2:
+            self.assertEqual(65535, rlen)
+        self.assertEqual(expected_n_rr[0],
+                         r.get_rr_count(Message.SECTION_ANSWER))
+        actual_rrs = r.get_section(Message.SECTION_ANSWER)[:]
+
+        # Check the second message
+        r, rlen = self.sock.read_msg(need_len=True)
+        if expected_n_rr[1] == 2:
+            self.assertEqual(65535, rlen)
+        self.assertEqual(expected_n_rr[1],
+                         r.get_rr_count(Message.SECTION_ANSWER))
+        actual_rrs.extend(r.get_section(Message.SECTION_ANSWER))
+        for (expected_rr, actual_rr) in zip([soa_rr, test_rr, soa_rr],
+                                            actual_rrs):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
+
+        # there should be no more message
+        self.assertEqual(0, len(self.sock.sendqueue))
+
+    def test_reply_xfrout_query_axfr_maxlen_with_soa(self):
+        # Similar to the 'maxlen' test, but the first message should be
+        # able to contain both SOA and the large RR.
+        soa = self.maxlen_test_common_setup()
+
+        # The first message will contain the question (5 bytes), so the
+        # test RDATA should allow a room for that.
+        test_rr = create_generic(Name('.'), 65512 - 5 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_soa_with_tsig(self):
+        # Similar to the previous case, but with TSIG (whose size is 81 bytes).
+        soa = self.maxlen_test_common_setup(True)
+        test_rr = create_generic(Name('.'), 65512 - 5 - 81 -
+                                 get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [2, 1])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_endsoa(self):
+        # Similar to the max w/ soa test, but the first message cannot contain
+        # both SOA and the long RR due to the question section.  The second
+        # message should be able to contain both.
+        soa = self.maxlen_test_common_setup()
+        test_rr = create_generic(Name('.'), 65512 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+    def test_reply_xfrout_query_axfr_maxlen_with_endsoa_with_tsig(self):
+        # Similar to the previous case, but with TSIG.
+        soa = self.maxlen_test_common_setup(True)
+        test_rr = create_generic(Name('.'), 65512 - 81 - get_rrset_len(soa))
+        self.xfrsess._iterator = [soa, test_rr]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
+        self.maxlen_test_common_checks(soa, test_rr, [1, 2])
+
+    def test_reply_xfrout_query_axfr_toobigdata(self):
+        # Similar to the 'maxlen' test, but the RR doesn't even fit in a
+        # single message.
+        self.xfrsess._soa = self.soa_rrset
+        test_rr = create_generic(Name('.'), 65513) # 1 byte larger than 'max'
+        self.xfrsess._iterator = [self.soa_rrset, test_rr]
+        # the reply method should fail with exception
+        self.assertRaises(XfroutSessionError, self.xfrsess._reply_xfrout_query,
+                          self.getmsg(), self.sock)
+        # The first message should still have been sent and contain the
+        # beginning SOA, and only that RR
+        r = self.sock.read_msg()
+        self.assertEqual(1, r.get_rr_count(Message.SECTION_ANSWER))
+        self.assertTrue(rrsets_equal(self.soa_rrset,
+                                     r.get_section(Message.SECTION_ANSWER)[0]))
+        # And there should have been no other messages sent
+        self.assertEqual(0, len(self.sock.sendqueue))
 
     def test_reply_xfrout_query_ixfr_soa_only(self):
         # Creating an IXFR response that contains only one RR, which is the
@@ -875,7 +1020,8 @@ class TestXfroutSession(TestXfroutSessionBase):
         reply_msg = self.sock.read_msg(Message.PRESERVE_ORDER)
         answer = reply_msg.get_section(Message.SECTION_ANSWER)
         self.assertEqual(1, len(answer))
-        self.assertTrue(create_soa(SOA_CURRENT_VERSION), answer[0])
+        self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+                                     answer[0]))
 
 class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
     '''Tests for XFR-out sessions using an SQLite3 DB.
@@ -899,14 +1045,23 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
         # This zone contains two A RRs for the same name with different TTLs.
         # These TTLs should be preseved in the AXFR stream.
         actual_records = response.get_section(Message.SECTION_ANSWER)
-        expected_records = [create_soa(2011112001),
-                            create_ns(self.ns_name),
-                            create_a(Name(self.ns_name), '192.0.2.1', 3600),
-                            create_a(Name(self.ns_name), '192.0.2.2', 7200),
-                            create_soa(2011112001)]
-        self.assertEqual(len(expected_records), len(actual_records))
-        for (expected_rr, actual_rr) in zip(expected_records, actual_records):
-            self.assertTrue(expected_rr, actual_rr)
+        self.assertEqual(5, len(actual_records))
+        # The first and last RR should be the expected SOA
+        expected_soa = create_soa(2011112001)
+        self.assertTrue(rrsets_equal(expected_soa, actual_records[0]))
+        self.assertTrue(rrsets_equal(expected_soa, actual_records[-1]))
+
+        # The ordering of the intermediate RRs can differ depending on the
+        # internal details of the SQLite3 library, so we sort them by a simple
+        # rule sufficient for the purpose here, and then compare them.
+        expected_others = [create_ns(self.ns_name),
+                           create_a(Name(self.ns_name), '192.0.2.1', 3600),
+                           create_a(Name(self.ns_name), '192.0.2.2', 7200)]
+        keyfn = lambda x: (x.get_type(), x.get_ttl())
+        for (expected_rr, actual_rr) in zip(sorted(expected_others, key=keyfn),
+                                            sorted(actual_records[1:4],
+                                                   key=keyfn)):
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
 
     def test_axfr_normal_session(self):
         XfroutSession._handle(self.xfrsess)
@@ -945,7 +1100,7 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
                             create_soa(2011112001)]
         self.assertEqual(len(expected_records), len(actual_records))
         for (expected_rr, actual_rr) in zip(expected_records, actual_records):
-            self.assertTrue(expected_rr, actual_rr)
+            self.assertTrue(rrsets_equal(expected_rr, actual_rr))
 
     def test_ixfr_soa_only(self):
         # The requested SOA serial is the latest one.  The response should
@@ -956,7 +1111,8 @@ class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
         response = self.sock.read_msg(Message.PRESERVE_ORDER);
         answers = response.get_section(Message.SECTION_ANSWER)
         self.assertEqual(1, len(answers))
-        self.assertTrue(create_soa(SOA_CURRENT_VERSION), answers[0])
+        self.assertTrue(rrsets_equal(create_soa(SOA_CURRENT_VERSION),
+                                     answers[0]))
 
 class MyUnixSockServer(UnixSockServer):
     def __init__(self):
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index d450138..310a0aa 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -66,6 +66,11 @@ class XfroutConfigError(Exception):
     """
     pass
 
+class XfroutSessionError(Exception):
+    '''An exception raised for some unexpected events during an xfrout session.
+    '''
+    pass
+
 def init_paths():
     global SPECFILE_PATH
     global AUTH_SPECFILE_PATH
@@ -93,7 +98,8 @@ init_paths()
 SPECFILE_LOCATION = SPECFILE_PATH + "/xfrout.spec"
 AUTH_SPECFILE_LOCATION = AUTH_SPECFILE_PATH + os.sep + "auth.spec"
 VERBOSE_MODE = False
-XFROUT_MAX_MESSAGE_SIZE = 65535
+XFROUT_DNS_HEADER_SIZE = 12     # protocol constant
+XFROUT_MAX_MESSAGE_SIZE = 65535 # ditto
 
 # borrowed from xfrin.py @ #1298.  We should eventually unify it.
 def format_zone_str(zone_name, zone_class):
@@ -534,32 +540,44 @@ class XfroutSession():
 
     def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa,
                                     message_upper_len):
-        '''Add the SOA record to the end of message. If it can't be
-        added, a new message should be created to send out the last soa .
+        '''Add the SOA record to the end of message.
+
+        If it would exceed the maximum allowable size of a message, a new
+        message will be created to send out the last SOA.
+
+        We assume a message with a single SOA can always fit the buffer
+        with or without TSIG.  In theory this could be wrong if TSIG is
+        stupidly large, but in practice this assumption should be reasonable.
         '''
-        if (message_upper_len + self._tsig_len + get_rrset_len(rrset_soa) >=
-            XFROUT_MAX_MESSAGE_SIZE):
+        if message_upper_len + get_rrset_len(rrset_soa) > \
+                XFROUT_MAX_MESSAGE_SIZE:
             self._send_message(sock_fd, msg, self._tsig_ctx)
             msg = self._clear_message(msg)
 
-        # If tsig context exist, sign the last packet
         msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
     def _reply_xfrout_query(self, msg, sock_fd):
-        #TODO, there should be a better way to insert rrset.
         msg.make_response()
         msg.set_header_flag(Message.HEADERFLAG_AA)
+        # Reserved space for the fixed header size, the size of the question
+        # section, and TSIG size (when included).  The size of the question
+        # section is the sum of the qname length and the size of the
+        # fixed-length fields (type and class, 2 bytes each).
+        message_upper_len = XFROUT_DNS_HEADER_SIZE + \
+            msg.get_question()[0].get_name().get_length() + 4 + \
+            self._tsig_len
 
         # If the iterator is None, we are responding to IXFR with a single
         # SOA RR.
         if self._iterator is None:
-            self._send_message_with_last_soa(msg, sock_fd, self._soa, 0)
+            self._send_message_with_last_soa(msg, sock_fd, self._soa,
+                                             message_upper_len)
             return
 
         # Add the beginning SOA
         msg.add_rrset(Message.SECTION_ANSWER, self._soa)
-        message_upper_len = get_rrset_len(self._soa) + self._tsig_len
+        message_upper_len += get_rrset_len(self._soa)
 
         # Add the rest of the zone/diff contets
         for rrset in self._iterator:
@@ -577,20 +595,33 @@ class XfroutSession():
             # size without compression) and use that to see if we
             # may have reached the limit
             rrset_len = get_rrset_len(rrset)
-            if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
+
+            if message_upper_len + rrset_len <= XFROUT_MAX_MESSAGE_SIZE:
                 msg.add_rrset(Message.SECTION_ANSWER, rrset)
                 message_upper_len += rrset_len
                 continue
 
+            # RR would not fit.  If there are other RRs in the buffer, send
+            # them now and leave this RR to the next message.
             self._send_message(sock_fd, msg, self._tsig_ctx)
 
+            # Create a new message and reserve space for the carried-over
+            # RR (and TSIG space in case it's to be TSIG signed)
             msg = self._clear_message(msg)
+            message_upper_len = XFROUT_DNS_HEADER_SIZE + rrset_len + \
+                self._tsig_len
+
+            # If this RR overflows the buffer all by itself, fail.  In theory
+            # some RRs might fit in a TCP message when compressed even if they
+            # do not fit when uncompressed, but surely we don't want to send
+            # such monstrosities to an unsuspecting slave.
+            if message_upper_len > XFROUT_MAX_MESSAGE_SIZE:
+                raise XfroutSessionError('RR too large for zone transfer (' +
+                                         str(rrset_len) + ' bytes)')
+
             # Add the RRset to the new message
             msg.add_rrset(Message.SECTION_ANSWER, rrset)
 
-            # Reserve tsig space for signed packet
-            message_upper_len = rrset_len + self._tsig_len
-
         # Add and send the trailing SOA
         self._send_message_with_last_soa(msg, sock_fd, self._soa,
                                          message_upper_len)
@@ -782,7 +813,6 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
             os.unlink(self._sock_file)
         except Exception as e:
             logger.error(XFROUT_REMOVE_UNIX_SOCKET_FILE_ERROR, self._sock_file, str(e))
-            pass
 
     def update_config_data(self, new_config):
         '''Apply the new config setting of xfrout module.
diff --git a/src/lib/asiolink/Makefile.am b/src/lib/asiolink/Makefile.am
index 5444547..07c3e13 100644
--- a/src/lib/asiolink/Makefile.am
+++ b/src/lib/asiolink/Makefile.am
@@ -14,6 +14,9 @@ CLEANFILES = *.gcno *.gcda
 # with -Werror (our default setting).
 
 lib_LTLIBRARIES = libasiolink.la
+
+libasiolink_la_LDFLAGS = -no-undefined -version-info 1:0:1
+
 libasiolink_la_SOURCES  = asiolink.h
 libasiolink_la_SOURCES += dummy_io_cb.h
 libasiolink_la_SOURCES += interval_timer.cc interval_timer.h
diff --git a/src/lib/cryptolink/Makefile.am b/src/lib/cryptolink/Makefile.am
index 93f3443..fc12fae 100644
--- a/src/lib/cryptolink/Makefile.am
+++ b/src/lib/cryptolink/Makefile.am
@@ -11,4 +11,5 @@ lib_LTLIBRARIES = libcryptolink.la
 libcryptolink_la_SOURCES = cryptolink.h cryptolink.cc
 libcryptolink_la_SOURCES += crypto_hmac.h crypto_hmac.cc
 
-libcryptolink_la_LIBADD = ${BOTAN_LDFLAGS} ${BOTAN_RPATH}
+libcryptolink_la_LDFLAGS = ${BOTAN_LDFLAGS}
+libcryptolink_la_LIBADD = ${BOTAN_LIBS} ${BOTAN_RPATH}
diff --git a/src/lib/cryptolink/tests/Makefile.am b/src/lib/cryptolink/tests/Makefile.am
index fbdd13f..6ac6fdf 100644
--- a/src/lib/cryptolink/tests/Makefile.am
+++ b/src/lib/cryptolink/tests/Makefile.am
@@ -16,8 +16,8 @@ TESTS += run_unittests
 run_unittests_SOURCES = run_unittests.cc
 run_unittests_SOURCES += crypto_unittests.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-run_unittests_LDFLAGS = ${BOTAN_LDFLAGS} $(AM_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS =  $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS) 
+run_unittests_LDADD = $(GTEST_LDADD) $(BOTAN_LIBS)
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/cryptolink/libcryptolink.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/datasrc/Makefile.am b/src/lib/datasrc/Makefile.am
index bf1171e..b6c314c 100644
--- a/src/lib/datasrc/Makefile.am
+++ b/src/lib/datasrc/Makefile.am
@@ -7,9 +7,15 @@ AM_CPPFLAGS += $(SQLITE_CFLAGS)
 
 AM_CXXFLAGS = $(B10_CXXFLAGS)
 
+pkglibexecdir = $(libexecdir)/@PACKAGE@/backends
+
+datasrc_config.h: datasrc_config.h.pre
+	$(SED) -e "s|@@PKGLIBEXECDIR@@|$(pkglibexecdir)|" datasrc_config.h.pre >$@
+
 CLEANFILES = *.gcno *.gcda datasrc_messages.h datasrc_messages.cc
+CLEANFILES += datasrc_config.h
 
-lib_LTLIBRARIES = libdatasrc.la sqlite3_ds.la memory_ds.la
+lib_LTLIBRARIES = libdatasrc.la
 libdatasrc_la_SOURCES = data_source.h data_source.cc
 libdatasrc_la_SOURCES += static_datasrc.h static_datasrc.cc
 libdatasrc_la_SOURCES += sqlite3_datasrc.h sqlite3_datasrc.cc
@@ -25,8 +31,11 @@ libdatasrc_la_SOURCES += database.h database.cc
 libdatasrc_la_SOURCES += factory.h factory.cc
 nodist_libdatasrc_la_SOURCES = datasrc_messages.h datasrc_messages.cc
 
+pkglibexec_LTLIBRARIES =  sqlite3_ds.la memory_ds.la
+
 sqlite3_ds_la_SOURCES = sqlite3_accessor.h sqlite3_accessor.cc
 sqlite3_ds_la_LDFLAGS = -module
+sqlite3_ds_la_LDFLAGS += -no-undefined -version-info 1:0:0
 sqlite3_ds_la_LIBADD = $(top_builddir)/src/lib/exceptions/libexceptions.la
 sqlite3_ds_la_LIBADD += libdatasrc.la
 sqlite3_ds_la_LIBADD += $(SQLITE_LIBS)
@@ -42,7 +51,7 @@ libdatasrc_la_LIBADD += $(top_builddir)/src/lib/log/liblog.la
 libdatasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
 libdatasrc_la_LIBADD += $(SQLITE_LIBS)
 
-BUILT_SOURCES = datasrc_messages.h datasrc_messages.cc
+BUILT_SOURCES = datasrc_config.h datasrc_messages.h datasrc_messages.cc
 datasrc_messages.h datasrc_messages.cc: Makefile datasrc_messages.mes
 	$(top_builddir)/src/lib/log/compiler/message $(top_srcdir)/src/lib/datasrc/datasrc_messages.mes
 
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index 053d4bc..45ce0c2 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -843,7 +843,7 @@ public:
         committed_(false), accessor_(accessor), zone_id_(zone_id),
         db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
         zone_class_(zone_class), journaling_(journaling),
-        diff_phase_(NOT_STARTED),
+        diff_phase_(NOT_STARTED), serial_(0),
         finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
     {
         logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
@@ -896,7 +896,7 @@ private:
         ADD
     };
     DiffPhase diff_phase_;
-    uint32_t serial_;
+    Serial serial_;
     boost::scoped_ptr<DatabaseClient::Finder> finder_;
 
     // This is a set of validation checks commonly used for addRRset() and
@@ -985,8 +985,8 @@ DatabaseUpdater::addRRset(const RRset& rrset) {
         columns[Accessor::ADD_RDATA] = it->getCurrent().toText();
         if (journaling_) {
             journal[Accessor::DIFF_RDATA] = columns[Accessor::ADD_RDATA];
-            accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_ADD,
-                                     journal);
+            accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+                                     Accessor::DIFF_ADD, journal);
         }
         accessor_->addRecordToZone(columns);
     }
@@ -1023,8 +1023,8 @@ DatabaseUpdater::deleteRRset(const RRset& rrset) {
         params[Accessor::DEL_RDATA] = it->getCurrent().toText();
         if (journaling_) {
             journal[Accessor::DIFF_RDATA] = params[Accessor::DEL_RDATA];
-            accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_DELETE,
-                                     journal);
+            accessor_->addRecordDiff(zone_id_, serial_.getValue(),
+                                     Accessor::DIFF_DELETE, journal);
         }
         accessor_->deleteRecordInZone(params);
     }
diff --git a/src/lib/datasrc/datasrc_config.h.pre.in b/src/lib/datasrc/datasrc_config.h.pre.in
new file mode 100644
index 0000000..ff99601
--- /dev/null
+++ b/src/lib/datasrc/datasrc_config.h.pre.in
@@ -0,0 +1,31 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+#ifndef __DATASRC_CONFIG_H
+#define __DATASRC_CONFIG_H 1
+
+namespace isc {
+namespace datasrc {
+
+/// \brief Default directory to find the loadable data source libraries
+///
+/// This is the directory where, once installed, loadable backend libraries
+/// such as memory_ds.so and sqlite3_ds.so are found. It is used by the
+/// DataSourceClient loader if no absolute path is used and
+/// B10_FROM_BUILD is not set in the environment.
+const char* const BACKEND_LIBRARY_PATH = "@@PKGLIBEXECDIR@@/";
+
+} // end namespace datasrc
+} // end namespace isc
+
+#endif // __DATASRC_CONFIG_H
diff --git a/src/lib/datasrc/factory.cc b/src/lib/datasrc/factory.cc
index 1818c70..35a79fe 100644
--- a/src/lib/datasrc/factory.cc
+++ b/src/lib/datasrc/factory.cc
@@ -19,13 +19,59 @@
 #include "sqlite3_accessor.h"
 #include "memory_datasrc.h"
 
+#include "datasrc_config.h"
+
 #include <datasrc/logger.h>
 
 #include <dlfcn.h>
+#include <cstdlib>
 
+using namespace std;
 using namespace isc::data;
 using namespace isc::datasrc;
 
+namespace {
+// This helper function takes the 'type' string as passed to
+// the DataSourceClient container below, and, unless it
+// already specifies a specific loadable .so file, will
+// convert the short-name to the full file.
+// I.e. it will add '_ds.so' (if necessary), and prepend
+// it with an absolute path (if necessary).
+// Returns the resulting string to use with LibraryContainer.
+const std::string
+getDataSourceLibFile(const std::string& type) {
+    if (type.empty()) {
+        isc_throw(DataSourceLibraryError,
+                  "DataSourceClient container called with empty type value");
+    }
+    if (type == ".so") {
+        isc_throw(DataSourceLibraryError, "DataSourceClient container called "
+                                          "with bad type or file name");
+    }
+
+    // Type can be either a short name, in which case we need to
+    // append "_ds.so", or it can be a direct .so library.
+    std::string lib_file = type;
+    const int ext_pos = lib_file.rfind(".so");
+    if (ext_pos == std::string::npos || ext_pos + 3 != lib_file.length()) {
+        lib_file.append("_ds.so");
+    }
+    // And if it is not an absolute path, prepend it with our
+    // loadable backend library path
+    if (type[0] != '/') {
+        // When running from the build tree, we do NOT want
+        // to load the installed loadable library
+        if (getenv("B10_FROM_BUILD") != NULL) {
+            lib_file = std::string(getenv("B10_FROM_BUILD")) +
+                       "/src/lib/datasrc/.libs/" + lib_file;
+        } else {
+            lib_file = isc::datasrc::BACKEND_LIBRARY_PATH + lib_file;
+        }
+    }
+    return (lib_file);
+}
+} // end anonymous namespace
+
 namespace isc {
 namespace datasrc {
 
@@ -34,7 +80,10 @@ LibraryContainer::LibraryContainer(const std::string& name) {
     // are recognized as such
     ds_lib_ = dlopen(name.c_str(), RTLD_NOW | RTLD_GLOBAL);
     if (ds_lib_ == NULL) {
-        isc_throw(DataSourceLibraryError, dlerror());
+        // This may cause the filename to appear twice in the actual
+        // error, but the output of dlerror is implementation-dependent
+        isc_throw(DataSourceLibraryError, "dlopen failed for " << name << 
+                                          ": " << dlerror());
     }
 }
 
@@ -61,7 +110,7 @@ LibraryContainer::getSym(const char* name) {
 
 DataSourceClientContainer::DataSourceClientContainer(const std::string& type,
                                                      ConstElementPtr config)
-: ds_lib_(type + "_ds.so")
+: ds_lib_(getDataSourceLibFile(type))
 {
     // We are casting from a data to a function pointer here
     // Some compilers (rightfully) complain about that, but
diff --git a/src/lib/datasrc/factory.h b/src/lib/datasrc/factory.h
index 0284067..9d0a762 100644
--- a/src/lib/datasrc/factory.h
+++ b/src/lib/datasrc/factory.h
@@ -68,7 +68,7 @@ public:
     ///             the library path.
     ///
     /// \exception DataSourceLibraryError If the library cannot be found or
-    ///            cannot be loaded.
+    ///            cannot be loaded, or if name is an empty string.
     LibraryContainer(const std::string& name);
 
     /// \brief Destructor
@@ -115,6 +115,15 @@ private:
 /// easy recognition and to reduce potential mistakes.
 /// For example, the sqlite3 implementation has the type 'sqlite3', and the
 /// derived filename 'sqlite3_ds.so'
+/// The value of type can be a specific loadable library; if it already ends
+/// with '.so', the loader will not add '_ds.so'.
+/// It may also be an absolute path; if it starts with '/', nothing is
+/// prepended. If it does not, the loadable library will be taken from the
+/// installation directory, see the value of
+/// isc::datasrc::BACKEND_LIBRARY_PATH in datasrc_config.h for the exact path.
+///
+/// \note When 'B10_FROM_BUILD' is set in the environment, the build
+///       directory is used instead of the install directory.
 ///
 /// There are of course some demands to an implementation, not all of which
 /// can be verified compile-time. It must provide a creator and destructor
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 70f2999..6dd6b0a 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -55,13 +55,6 @@ run_unittests_SOURCES += test_datasrc.h test_datasrc.cc
 run_unittests_SOURCES += rbtree_unittest.cc
 run_unittests_SOURCES += logger_unittest.cc
 run_unittests_SOURCES += client_unittest.cc
-if !USE_STATIC_LINK
-# This test uses dynamically loadable module.  It will cause various
-# troubles with static link such as "missing" symbols in the static object
-# for the module.  As a workaround we disable this particualr test
-# in this case.
-run_unittests_SOURCES += factory_unittest.cc
-endif
 
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
 run_unittests_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
@@ -110,3 +103,21 @@ EXTRA_DIST += testdata/test.sqlite3
 EXTRA_DIST += testdata/test.sqlite3.nodiffs
 EXTRA_DIST += testdata/rwtest.sqlite3
 EXTRA_DIST += testdata/diffs.sqlite3
+
+# For the factory unit tests, we need to specify that we want
+# the loadable backend libraries from the build tree, and not from 
+# the installation directory. Therefore we build it into a separate
+# binary, and call that from check-local with B10_FROM_BUILD set.
+# Also, we only want to do this when static building is not used,
+# since it will cause various troubles with static link such as
+# "missing" symbols in the static object for the module.
+if !USE_STATIC_LINK
+noinst_PROGRAMS+=run_unittests_factory
+run_unittests_factory_SOURCES = $(common_sources)
+run_unittests_factory_SOURCES += factory_unittest.cc
+run_unittests_factory_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_factory_LDFLAGS  = $(AM_LDFLAGS)  $(GTEST_LDFLAGS)
+run_unittests_factory_LDADD = $(common_ldadd)
+check-local:
+	B10_FROM_BUILD=${abs_top_builddir} ./run_unittests_factory
+endif
diff --git a/src/lib/datasrc/tests/factory_unittest.cc b/src/lib/datasrc/tests/factory_unittest.cc
index 0133508..e98f9bc 100644
--- a/src/lib/datasrc/tests/factory_unittest.cc
+++ b/src/lib/datasrc/tests/factory_unittest.cc
@@ -14,6 +14,7 @@
 
 #include <boost/scoped_ptr.hpp>
 
+#include <datasrc/datasrc_config.h>
 #include <datasrc/factory.h>
 #include <datasrc/data_source.h>
 #include <datasrc/sqlite3_accessor.h>
@@ -30,6 +31,70 @@ std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
 
 namespace {
 
+// note this helper only checks the error that is received up to the length
+// of the expected string. It will always pass if you give it an empty
+// expected_error
+void
+pathtestHelper(const std::string& file, const std::string& expected_error) {
+    std::string error;
+    try {
+        DataSourceClientContainer(file, ElementPtr());
+    } catch (const DataSourceLibraryError& dsle) {
+        error = dsle.what();
+    }
+    ASSERT_LT(expected_error.size(), error.size());
+    EXPECT_EQ(expected_error, error.substr(0, expected_error.size()));
+}
+
+TEST(FactoryTest, paths) {
+    // Test whether the paths are made absolute if they are not,
+    // by inspecting the error that is raised when they are wrong
+    const std::string error("dlopen failed for ");
+    // With the current implementation, we can safely assume this has
+    // been set for this test (as the loader would otherwise also fail
+    // unless the loadable backend library happens to be installed)
+    const std::string builddir(getenv("B10_FROM_BUILD"));
+
+    // Absolute and ending with .so should have no change
+    pathtestHelper("/no_such_file.so", error + "/no_such_file.so");
+
+    // If no ending in .so, it should get _ds.so
+    pathtestHelper("/no_such_file", error + "/no_such_file_ds.so");
+
+    // If not starting with /, path should be added. For this test that
+    // means the build directory as set in B10_FROM_BUILD
+    pathtestHelper("no_such_file.so", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so");
+    pathtestHelper("no_such_file", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file_ds.so");
+
+    // Some tests with '.so' in the name itself
+    pathtestHelper("no_such_file.so.something", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so.something_ds.so");
+    pathtestHelper("/no_such_file.so.something", error +
+                   "/no_such_file.so.something_ds.so");
+    pathtestHelper("/no_such_file.so.something.so", error +
+                   "/no_such_file.so.something.so");
+    pathtestHelper("/no_such_file.so.so", error +
+                   "/no_such_file.so.so");
+    pathtestHelper("no_such_file.so.something", error + builddir +
+                   "/src/lib/datasrc/.libs/no_such_file.so.something_ds.so");
+
+    // Temporarily unset B10_FROM_BUILD to see that BACKEND_LIBRARY_PATH
+    // is used
+    unsetenv("B10_FROM_BUILD");
+    pathtestHelper("no_such_file.so", error + BACKEND_LIBRARY_PATH +
+                   "no_such_file.so");
+    // Put it back just in case
+    setenv("B10_FROM_BUILD", builddir.c_str(), 1);
+
+    // Test some bad input values
+    ASSERT_THROW(DataSourceClientContainer("", ElementPtr()),
+                 DataSourceLibraryError);
+    ASSERT_THROW(DataSourceClientContainer(".so", ElementPtr()),
+                 DataSourceLibraryError);
+}
+
 TEST(FactoryTest, sqlite3ClientBadConfig) {
     // We start out by building the configuration data bit by bit,
     // testing each form of 'bad config', until we have a good one.
diff --git a/src/lib/dhcp/Makefile.am b/src/lib/dhcp/Makefile.am
index 64dda17..3991033 100644
--- a/src/lib/dhcp/Makefile.am
+++ b/src/lib/dhcp/Makefile.am
@@ -14,6 +14,7 @@ libdhcp_la_SOURCES += option.cc option.h
 libdhcp_la_SOURCES += option6_ia.cc option6_ia.h
 libdhcp_la_SOURCES += option6_iaaddr.cc option6_iaaddr.h
 libdhcp_la_SOURCES += option6_addrlst.cc option6_addrlst.h
+libdhcp_la_SOURCES += option4_addrlst.cc option4_addrlst.h
 libdhcp_la_SOURCES += dhcp6.h dhcp4.h
 libdhcp_la_SOURCES += pkt6.cc pkt6.h
 libdhcp_la_SOURCES += pkt4.cc pkt4.h
diff --git a/src/lib/dhcp/libdhcp.cc b/src/lib/dhcp/libdhcp.cc
index b95a427..f84e495 100644
--- a/src/lib/dhcp/libdhcp.cc
+++ b/src/lib/dhcp/libdhcp.cc
@@ -17,6 +17,7 @@
 #include <util/buffer.h>
 #include <dhcp/libdhcp.h>
 #include "config.h"
+#include <dhcp/dhcp4.h>
 #include <dhcp/dhcp6.h>
 #include <dhcp/option.h>
 #include <dhcp/option6_ia.h>
@@ -90,8 +91,17 @@ LibDHCP::unpackOptions4(const std::vector<uint8_t>& buf,
     size_t offset = 0;
 
     // 2 - header of DHCPv4 option
-    while (offset + 2 <= buf.size()) {
+    while (offset + 1 <= buf.size()) {
         uint8_t opt_type = buf[offset++];
+        if (offset + 1 == buf.size()) {
+            if (opt_type == DHO_END)
+                return; // just return. Don't need to add DHO_END option
+            else {
+                isc_throw(OutOfRange, "Attempt to parse truncated option "
+                          << opt_type);
+            }
+        }
+
         uint8_t opt_len =  buf[offset++];
         if (offset + opt_len > buf.size() ) {
             isc_throw(OutOfRange, "Option parse failed. Tried to parse "
diff --git a/src/lib/dhcp/option.cc b/src/lib/dhcp/option.cc
index daef288..20dd97a 100644
--- a/src/lib/dhcp/option.cc
+++ b/src/lib/dhcp/option.cc
@@ -128,23 +128,6 @@ Option::pack4(isc::util::OutputBuffer& buf) {
 }
 
 unsigned int
-Option::pack4(boost::shared_array<uint8_t>& buf,
-             unsigned int buf_len,
-             unsigned int offset) {
-    if (offset + len() > buf_len) {
-        isc_throw(OutOfRange, "Failed to pack v4 option=" <<
-                  type_ << ",len=" << len() << ": too small buffer.");
-    }
-    uint8_t *ptr = &buf[offset];
-    ptr[0] = type_;
-    ptr[1] = len() - getHeaderLen();
-    ptr += 2;
-    memcpy(ptr, &data_[0], data_.size());
-
-    return offset + len();
-}
-
-unsigned int
 Option::pack6(boost::shared_array<uint8_t>& buf,
              unsigned int buf_len,
              unsigned int offset) {
@@ -220,7 +203,7 @@ Option::unpack6(const boost::shared_array<uint8_t>& buf,
 
 /// Returns length of the complete option (data length + DHCPv4/DHCPv6
 /// option header)
-unsigned short
+uint16_t
 Option::len() {
 
     // length of the whole option is header and data stored in this option...
@@ -295,17 +278,7 @@ std::string Option::toText(int indent /* =0 */ ) {
     return tmp.str();
 }
 
-unsigned short
-Option::getType() {
-    return type_;
-}
-
-const std::vector<uint8_t>&
-Option::getData() {
-    return (data_);
-}
-
-unsigned short
+uint16_t
 Option::getHeaderLen() {
     switch (universe_) {
     case V4:
diff --git a/src/lib/dhcp/option.h b/src/lib/dhcp/option.h
index 3822cf0..088d094 100644
--- a/src/lib/dhcp/option.h
+++ b/src/lib/dhcp/option.h
@@ -178,20 +178,19 @@ public:
     /// Returns option type (0-255 for DHCPv4, 0-65535 for DHCPv6)
     ///
     /// @return option type
-    unsigned short
-    getType();
+    unsigned short getType() { return (type_); }
 
     /// Returns length of the complete option (data length + DHCPv4/DHCPv6
     /// option header)
     ///
     /// @return length of the option
-    virtual unsigned short
+    virtual uint16_t
     len();
 
     /// @brief Returns length of header (2 for v4, 4 for v6)
     ///
     /// @return length of option header
-    virtual unsigned short
+    virtual uint16_t
     getHeaderLen();
 
     /// returns if option is valid (e.g. option may be truncated)
@@ -202,9 +201,9 @@ public:
 
     /// Returns pointer to actual data.
     ///
-    /// @return pointer to actual data (or NULL if there is no data)
-    virtual const std::vector<uint8_t>&
-    getData();
+    /// @return pointer to actual data (or reference to an empty vector
+    ///         if there is no data)
+    virtual const std::vector<uint8_t>& getData() { return (data_); }
 
     /// Adds a sub-option.
     ///
@@ -242,20 +241,6 @@ public:
     ~Option();
 
 protected:
-
-    /// Builds raw (over-wire) buffer of this option, including all
-    /// defined suboptions. Version for building DHCPv4 options.
-    ///
-    /// @param buf output buffer (built options will be stored here)
-    /// @param buf_len buffer length (used for buffer overflow checks)
-    /// @param offset offset from start of the buf buffer
-    ///
-    /// @return offset to the next byte after last used byte
-    virtual unsigned int
-    pack4(boost::shared_array<uint8_t>& buf,
-          unsigned int buf_len,
-          unsigned int offset);
-
     /// Builds raw (over-wire) buffer of this option, including all
     /// defined suboptions. Version for building DHCPv4 options.
     ///
diff --git a/src/lib/dhcp/option4_addrlst.cc b/src/lib/dhcp/option4_addrlst.cc
new file mode 100644
index 0000000..88eb915
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.cc
@@ -0,0 +1,135 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <string.h>
+#include <stdint.h>
+#include <arpa/inet.h>
+#include <sstream>
+#include <iomanip>
+#include <exceptions/exceptions.h>
+#include <asiolink/io_address.h>
+#include <util/io_utilities.h>
+#include <dhcp/option4_addrlst.h>
+
+using namespace std;
+using namespace isc::dhcp;
+using namespace isc::util;
+using namespace isc::asiolink;
+
+Option4AddrLst::Option4AddrLst(uint8_t type)
+    :Option(V4, type) {
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const AddressContainer& addrs)
+    :Option(V4, type) {
+    setAddresses(addrs);
+    // don't set addrs_ directly. setAddresses() will do additional checks.
+}
+
+
+Option4AddrLst::Option4AddrLst(uint8_t type,
+                               vector<uint8_t>::const_iterator first,
+                               vector<uint8_t>::const_iterator last)
+    :Option(V4, type) {
+    if ( (distance(first, last) % V4ADDRESS_LEN) ) {
+        isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_
+                  << " has invalid length=" << distance(first, last)
+                  << ", must be divisible by 4.");
+    }
+
+    while (first != last) {
+        const uint8_t* ptr = &(*first);
+        addAddress(IOAddress(readUint32(ptr)));
+        first += V4ADDRESS_LEN;
+    }
+}
+
+Option4AddrLst::Option4AddrLst(uint8_t type, const IOAddress& addr)
+    :Option(V4, type) {
+    setAddress(addr);
+}
+
+void
+Option4AddrLst::pack4(isc::util::OutputBuffer& buf) {
+
+    if (addrs_.size() * V4ADDRESS_LEN > 255) {
+        isc_throw(OutOfRange, "DHCPv4 Option4AddrLst " << type_ << " is too big."
+                  << "At most 255 bytes are supported.");
+        /// TODO Larger options can be stored as separate instances
+        /// of DHCPv4 options. Clients MUST concatenate them.
+        /// Fortunately, there are no such large options used today.
+    }
+
+    buf.writeUint8(type_);
+    buf.writeUint8(len() - getHeaderLen());
+
+    AddressContainer::const_iterator addr = addrs_.begin();
+
+    while (addr != addrs_.end()) {
+        buf.writeUint32(*addr);
+        ++addr;
+    }
+}
+
+void Option4AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET) {
+        isc_throw(BadValue, "Can't store non-IPv4 address in "
+                  << "Option4AddrLst option");
+    }
+    addrs_.clear();
+    addAddress(addr);
+}
+
+void Option4AddrLst::setAddresses(const AddressContainer& addrs) {
+
+    // Do not copy it as a whole. addAddress() does sanity checks.
+    // i.e. throw if someone tries to set IPv6 address.
+    addrs_.clear();
+    for (AddressContainer::const_iterator addr = addrs.begin();
+         addr != addrs.end(); ++addr) {
+        addAddress(*addr);
+    }
+}
+
+
+void Option4AddrLst::addAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET) {
+        isc_throw(BadValue, "Can't store non-IPv4 address in "
+                  << "Option4AddrLst option");
+    }
+    addrs_.push_back(addr);
+}
+
+uint16_t Option4AddrLst::len() {
+
+    // Returns length of the complete option (option header + data length)
+    return (getHeaderLen() + addrs_.size() * V4ADDRESS_LEN);
+}
+
+std::string Option4AddrLst::toText(int indent /* =0 */ ) {
+    std::stringstream tmp;
+
+    for (int i = 0; i < indent; i++) {
+        tmp << " ";
+    }
+
+    tmp << "type=" << type_ << ", len=" << len()-getHeaderLen() << ":";
+
+    for (AddressContainer::const_iterator addr = addrs_.begin();
+         addr != addrs_.end(); ++addr) {
+        tmp << " " << (*addr);
+    }
+
+    return tmp.str();
+}
diff --git a/src/lib/dhcp/option4_addrlst.h b/src/lib/dhcp/option4_addrlst.h
new file mode 100644
index 0000000..c795805
--- /dev/null
+++ b/src/lib/dhcp/option4_addrlst.h
@@ -0,0 +1,167 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef OPTION4_ADDRLST_H_
+#define OPTION4_ADDRLST_H_
+
+#include <string>
+#include <map>
+#include <vector>
+#include <boost/shared_ptr.hpp>
+#include <boost/shared_array.hpp>
+#include <util/buffer.h>
+#include <dhcp/option.h>
+
+namespace isc {
+namespace dhcp {
+
+
+/// @brief DHCPv4 Option class for handling list of IPv4 addresses.
+///
+/// This class handles a list of IPv4 addresses. An example of such option
+/// is dns-servers option. It can also be used to handle a single address.
+class Option4AddrLst : public isc::dhcp::Option {
+public:
+
+    /// Defines a collection of IPv4 addresses.
+    typedef std::vector<isc::asiolink::IOAddress> AddressContainer;
+
+    /// @brief Constructor, creates an option with empty list of addresses.
+    ///
+    /// Creates empty option that can hold addresses. Addresses can be added
+    /// with addAddress(), setAddress() or setAddresses().
+    ///
+    /// @param type option type
+    Option4AddrLst(uint8_t type);
+
+    /// @brief Constructor, creates an option with a list of addresses.
+    ///
+    /// Creates an option that contains specified list of IPv4 addresses.
+    ///
+    /// @param type option type
+    /// @param addrs container with a list of addresses
+    Option4AddrLst(uint8_t type, const AddressContainer& addrs);
+
+    /// @brief Constructor, creates an option with a single address.
+    ///
+    /// Creates an option that contains a single address.
+    ///
+    /// @param type option type
+    /// @param addr a single address that will be stored as 1-elem. address list
+    Option4AddrLst(uint8_t type, const isc::asiolink::IOAddress& addr);
+
+    /// @brief Constructor, used for received options.
+    ///
+    /// TODO: This can be templated to use different containers, not just
+    /// vector. Prototype should look like this:
+    /// template<typename InputIterator> Option(Universe u, uint16_t type,
+    /// InputIterator first, InputIterator last);
+    ///
+    /// vector<int8_t> myData;
+    /// Example usage: new Option(V4, 123, myData.begin()+1, myData.end()-1)
+    /// This will create DHCPv4 option of type 123 that contains data from
+    /// trimmed (first and last byte removed) myData vector.
+    ///
+    /// @param type option type (0-255 for V4 and 0-65535 for V6)
+    /// @param first iterator to the first element that should be copied
+    /// @param last iterator to the next element after the last one
+    ///        to be copied.
+    Option4AddrLst(uint8_t type, std::vector<uint8_t>::const_iterator first,
+           std::vector<uint8_t>::const_iterator last);
+
+    /// @brief Writes option in a wire-format to a buffer.
+    ///
+    /// Method will throw if option storing fails for some reason.
+    ///
+    /// TODO Once old (DHCPv6) implementation is rewritten,
+    /// unify pack4() and pack6() and rename them to just pack().
+    ///
+    /// @param buf output buffer (option will be stored there)
+    virtual void
+    pack4(isc::util::OutputBuffer& buf);
+
+    /// Returns string representation of the option.
+    ///
+    /// @param indent number of spaces before printing text
+    ///
+    /// @return string with text representation.
+    virtual std::string
+    toText(int indent = 0);
+
+    /// Returns length of the complete option (data length + DHCPv4/DHCPv6
+    /// option header)
+    ///
+    /// @return length of the option
+    virtual uint16_t len();
+
+    /// @brief Returns vector with addresses.
+    ///
+    /// We return a copy of our list. Although this includes overhead,
+    /// it also makes this list safe to use after this option object
+    /// is no longer available. As options are expected to hold only
+    /// a couple (1-3) addresses, the overhead is not that big.
+    ///
+    /// @return address container with addresses
+    AddressContainer
+    getAddresses() { return addrs_; };
+
+    /// @brief Sets addresses list.
+    ///
+    /// Clears existing list of addresses and adds a single address to that
+    /// list. This is very convenient method for options that are supposed to
+    /// only a single option. See addAddress() if you want to add
+    /// address to existing list or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addrs address collection to be set
+    void setAddresses(const AddressContainer& addrs);
+
+    /// @brief Clears address list and sets a single address.
+    ///
+    /// Clears existing list of addresses and adds a single address to that
+    /// list. This is very convenient method for options that are supposed to
+    /// only a single option. See addAddress() if you want to add
+    /// address to existing list or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addr an address that is going to be set as 1-element address list
+    void setAddress(const isc::asiolink::IOAddress& addr);
+
+    /// @brief Adds address to existing list of addresses.
+    ///
+    /// Adds a single address to that list. See setAddress() if you want to
+    /// define only a single address or setAddresses() if you want to
+    /// set the whole list at once.
+    ///
+    /// Passed address must be IPv4 address. Otherwire BadValue exception
+    /// will be thrown.
+    ///
+    /// @param addr an address thait is going to be added to existing list
+    void addAddress(const isc::asiolink::IOAddress& addr);
+
+protected:
+    /// contains list of addresses
+    AddressContainer addrs_;
+};
+
+} // namespace isc::dhcp
+} // namespace isc
+
+#endif
diff --git a/src/lib/dhcp/option6_addrlst.cc b/src/lib/dhcp/option6_addrlst.cc
index fc981fa..9be3810 100644
--- a/src/lib/dhcp/option6_addrlst.cc
+++ b/src/lib/dhcp/option6_addrlst.cc
@@ -50,6 +50,10 @@ Option6AddrLst::Option6AddrLst(unsigned short type,
 
 void
 Option6AddrLst::setAddress(const isc::asiolink::IOAddress& addr) {
+    if (addr.getFamily() != AF_INET6) {
+        isc_throw(BadValue, "Can't store non-IPv6 address in Option6AddrLst option");
+    }
+
     addrs_.clear();
     addrs_.push_back(addr);
 }
@@ -128,7 +132,7 @@ std::string Option6AddrLst::toText(int indent /* =0 */) {
     return tmp.str();
 }
 
-unsigned short Option6AddrLst::len() {
+uint16_t Option6AddrLst::len() {
 
     return (OPTION6_HDR_LEN + addrs_.size()*16);
 }
diff --git a/src/lib/dhcp/option6_addrlst.h b/src/lib/dhcp/option6_addrlst.h
index c5b32af..a73dc55 100644
--- a/src/lib/dhcp/option6_addrlst.h
+++ b/src/lib/dhcp/option6_addrlst.h
@@ -16,17 +16,16 @@
 #define OPTION6_ADDRLST_H_
 
 #include <vector>
-#include "asiolink/io_address.h"
-#include "dhcp/option.h"
+#include <asiolink/io_address.h>
+#include <dhcp/option.h>
 
 namespace isc {
 namespace dhcp {
 
-/// @brief Option class for handling list of IPv6 addresses.
+/// @brief DHCPv6 Option class for handling list of IPv6 addresses.
 ///
 /// This class handles a list of IPv6 addresses. An example of such option
 /// is dns-servers option. It can also be used to handle single address.
-///
 class Option6AddrLst: public Option {
 
 public:
@@ -105,17 +104,17 @@ public:
 
     /// @brief Returns vector with addresses.
     ///
-    /// As user may want to use/modify this list, it is better to return
-    /// a copy rather than const reference to the original. This is
-    /// usually one or two addresses long, so it is not a big deal.
-    ///
-    /// @return vector with addresses
+    /// We return a copy of our list. Although this includes overhead,
+    /// it also makes this list safe to use after this option object
+    /// is no longer available. As options are expected to hold only
+    /// a couple (1-3) addresses, the overhead is not that big.
     ///
+    /// @return address container with addresses
     AddressContainer
     getAddresses() { return addrs_; };
 
     // returns data length (data length + DHCPv4/DHCPv6 option header)
-    virtual unsigned short len();
+    virtual uint16_t len();
 
 protected:
     AddressContainer addrs_;
diff --git a/src/lib/dhcp/option6_ia.cc b/src/lib/dhcp/option6_ia.cc
index 46daee1..209f500 100644
--- a/src/lib/dhcp/option6_ia.cc
+++ b/src/lib/dhcp/option6_ia.cc
@@ -77,7 +77,7 @@ Option6IA::unpack(const boost::shared_array<uint8_t>& buf,
     if ( parse_len < OPTION6_IA_LEN || offset + OPTION6_IA_LEN > buf_len) {
         isc_throw(OutOfRange, "Option " << type_ << " truncated");
     }
-    
+
     iaid_ = readUint32(&buf[offset]);
     offset += sizeof(uint32_t);
 
@@ -121,9 +121,9 @@ std::string Option6IA::toText(int indent /* = 0*/) {
     return tmp.str();
 }
 
-unsigned short Option6IA::len() {
+uint16_t Option6IA::len() {
 
-    unsigned short length = OPTION6_HDR_LEN /*header (4)*/ +
+    uint16_t length = OPTION6_HDR_LEN /*header (4)*/ +
         OPTION6_IA_LEN  /* option content (12) */;
 
     // length of all suboptions
diff --git a/src/lib/dhcp/option6_ia.h b/src/lib/dhcp/option6_ia.h
index 516b2fc..cab8068 100644
--- a/src/lib/dhcp/option6_ia.h
+++ b/src/lib/dhcp/option6_ia.h
@@ -116,7 +116,7 @@ public:
     /// Returns length of this option, including option header and suboptions
     ///
     /// @return length of this option
-    virtual unsigned short
+    virtual uint16_t
     len();
 
 protected:
diff --git a/src/lib/dhcp/option6_iaaddr.cc b/src/lib/dhcp/option6_iaaddr.cc
index 4177714..fd3bca4 100644
--- a/src/lib/dhcp/option6_iaaddr.cc
+++ b/src/lib/dhcp/option6_iaaddr.cc
@@ -116,9 +116,9 @@ std::string Option6IAAddr::toText(int indent /* =0 */) {
     return tmp.str();
 }
 
-unsigned short Option6IAAddr::len() {
+uint16_t Option6IAAddr::len() {
 
-    unsigned short length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
+    uint16_t length = OPTION6_HDR_LEN + OPTION6_IAADDR_LEN;
 
     // length of all suboptions
     // TODO implement:
diff --git a/src/lib/dhcp/option6_iaaddr.h b/src/lib/dhcp/option6_iaaddr.h
index 60c5c48..40e5967 100644
--- a/src/lib/dhcp/option6_iaaddr.h
+++ b/src/lib/dhcp/option6_iaaddr.h
@@ -126,8 +126,7 @@ public:
     getValid() const { return valid_; }
 
     /// returns data length (data length + DHCPv4/DHCPv6 option header)
-    virtual unsigned short
-    len();
+    virtual uint16_t len();
 
 protected:
     /// contains an IPv6 address
diff --git a/src/lib/dhcp/pkt4.cc b/src/lib/dhcp/pkt4.cc
index ba07a10..bea93fc 100644
--- a/src/lib/dhcp/pkt4.cc
+++ b/src/lib/dhcp/pkt4.cc
@@ -47,11 +47,9 @@ Pkt4::Pkt4(uint8_t msg_type, uint32_t transid)
       yiaddr_(DEFAULT_ADDRESS),
       siaddr_(DEFAULT_ADDRESS),
       giaddr_(DEFAULT_ADDRESS),
-      bufferIn_(NULL, 0), // not used, this is TX packet
       bufferOut_(DHCPV4_PKT_HDR_LEN),
       msg_type_(msg_type)
 {
-    /// TODO: fixed fields, uncomment in ticket #1224
     memset(chaddr_, 0, MAX_CHADDR_LEN);
     memset(sname_, 0, MAX_SNAME_LEN);
     memset(file_, 0, MAX_FILE_LEN);
@@ -64,7 +62,6 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
       ifindex_(-1),
       local_port_(DHCP4_SERVER_PORT),
       remote_port_(DHCP4_CLIENT_PORT),
-      /// TODO Fixed fields, uncomment in ticket #1224
       op_(BOOTREQUEST),
       transid_(0),
       secs_(0),
@@ -73,7 +70,6 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
       yiaddr_(DEFAULT_ADDRESS),
       siaddr_(DEFAULT_ADDRESS),
       giaddr_(DEFAULT_ADDRESS),
-      bufferIn_(data, len),
       bufferOut_(0), // not used, this is RX packet
       msg_type_(DHCPDISCOVER)
 {
@@ -82,6 +78,9 @@ Pkt4::Pkt4(const uint8_t* data, size_t len)
                   << " received, at least " << DHCPV4_PKT_HDR_LEN
                   << "is expected");
     }
+
+    data_.resize(len);
+    memcpy(&data_[0], data, len);
 }
 
 size_t
@@ -117,35 +116,43 @@ Pkt4::pack() {
 
     LibDHCP::packOptions(bufferOut_, options_);
 
+    // add END option that indicates end of options
+    // (End option is very simple, just a 255 octet)
+    bufferOut_.writeUint8(DHO_END);
+
     return (true);
 }
 bool
 Pkt4::unpack() {
-    if (bufferIn_.getLength()<DHCPV4_PKT_HDR_LEN) {
+
+    // input buffer (used during message reception)
+    isc::util::InputBuffer bufferIn(&data_[0], data_.size());
+
+    if (bufferIn.getLength()<DHCPV4_PKT_HDR_LEN) {
         isc_throw(OutOfRange, "Received truncated DHCPv4 packet (len="
-                  << bufferIn_.getLength() << " received, at least "
+                  << bufferIn.getLength() << " received, at least "
                   << DHCPV4_PKT_HDR_LEN << "is expected");
     }
 
-    op_ = bufferIn_.readUint8();
-    htype_ = bufferIn_.readUint8();
-    hlen_ = bufferIn_.readUint8();
-    hops_ = bufferIn_.readUint8();
-    transid_ = bufferIn_.readUint32();
-    secs_ = bufferIn_.readUint16();
-    flags_ = bufferIn_.readUint16();
-    ciaddr_ = IOAddress(bufferIn_.readUint32());
-    yiaddr_ = IOAddress(bufferIn_.readUint32());
-    siaddr_ = IOAddress(bufferIn_.readUint32());
-    giaddr_ = IOAddress(bufferIn_.readUint32());
-    bufferIn_.readData(chaddr_, MAX_CHADDR_LEN);
-    bufferIn_.readData(sname_, MAX_SNAME_LEN);
-    bufferIn_.readData(file_, MAX_FILE_LEN);
-
-    size_t opts_len = bufferIn_.getLength() - bufferIn_.getPosition();
+    op_ = bufferIn.readUint8();
+    htype_ = bufferIn.readUint8();
+    hlen_ = bufferIn.readUint8();
+    hops_ = bufferIn.readUint8();
+    transid_ = bufferIn.readUint32();
+    secs_ = bufferIn.readUint16();
+    flags_ = bufferIn.readUint16();
+    ciaddr_ = IOAddress(bufferIn.readUint32());
+    yiaddr_ = IOAddress(bufferIn.readUint32());
+    siaddr_ = IOAddress(bufferIn.readUint32());
+    giaddr_ = IOAddress(bufferIn.readUint32());
+    bufferIn.readData(chaddr_, MAX_CHADDR_LEN);
+    bufferIn.readData(sname_, MAX_SNAME_LEN);
+    bufferIn.readData(file_, MAX_FILE_LEN);
+
+    size_t opts_len = bufferIn.getLength() - bufferIn.getPosition();
     vector<uint8_t> optsBuffer;
     // fist use of readVector
-    bufferIn_.readVector(optsBuffer, opts_len);
+    bufferIn.readVector(optsBuffer, opts_len);
     LibDHCP::unpackOptions4(optsBuffer, options_);
 
     return (true);
diff --git a/src/lib/dhcp/pkt4.h b/src/lib/dhcp/pkt4.h
index 8517091..189d95d 100644
--- a/src/lib/dhcp/pkt4.h
+++ b/src/lib/dhcp/pkt4.h
@@ -299,10 +299,21 @@ public:
     ///
     /// @return returns option of requested type (or NULL)
     ///         if no such option is present
-
     boost::shared_ptr<Option>
     getOption(uint8_t opt_type);
 
+
+    /// @brief set interface over which packet should be sent
+    ///
+    /// @param interface defines outbound interface
+    void setIface(const std::string& interface){ iface_ = interface; }
+
+    /// @brief gets interface over which packet was received or
+    ///        will be transmitted
+    ///
+    /// @return name of the interface
+    std::string getIface() const { return iface_; }
+
 protected:
 
     /// converts DHCP message type to BOOTP op type
@@ -385,14 +396,15 @@ protected:
 
     // end of real DHCPv4 fields
 
-    /// input buffer (used during message reception)
-    /// Note that it must be modifiable as hooks can modify incoming buffer),
-    /// thus OutputBuffer, not InputBuffer
-    isc::util::InputBuffer bufferIn_;
-
     /// output buffer (used during message
     isc::util::OutputBuffer bufferOut_;
 
+    // that's the data of input buffer used in RX packet. Note that
+    // InputBuffer does not store the data itself, but just expects that
+    // data will be valid for the whole life of InputBuffer. Therefore we
+    // need to keep the data around.
+    std::vector<uint8_t> data_;
+
     /// message type (e.g. 1=DHCPDISCOVER)
     /// TODO: this will eventually be replaced with DHCP Message Type
     /// option (option 53)
diff --git a/src/lib/dhcp/tests/Makefile.am b/src/lib/dhcp/tests/Makefile.am
index 01799da..176992f 100644
--- a/src/lib/dhcp/tests/Makefile.am
+++ b/src/lib/dhcp/tests/Makefile.am
@@ -18,6 +18,7 @@ libdhcp_unittests_SOURCES += ../libdhcp.h ../libdhcp.cc libdhcp_unittest.cc
 libdhcp_unittests_SOURCES += ../option6_iaaddr.h ../option6_iaaddr.cc option6_iaaddr_unittest.cc
 libdhcp_unittests_SOURCES += ../option6_ia.h ../option6_ia.cc option6_ia_unittest.cc
 libdhcp_unittests_SOURCES += ../option6_addrlst.h ../option6_addrlst.cc option6_addrlst_unittest.cc
+libdhcp_unittests_SOURCES += ../option4_addrlst.cc ../option4_addrlst.h option4_addrlst_unittest.cc
 libdhcp_unittests_SOURCES += ../option.h ../option.cc option_unittest.cc
 libdhcp_unittests_SOURCES += ../pkt6.h ../pkt6.cc pkt6_unittest.cc
 libdhcp_unittests_SOURCES += ../pkt4.h ../pkt4.cc pkt4_unittest.cc
diff --git a/src/lib/dhcp/tests/option4_addrlst_unittest.cc b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
new file mode 100644
index 0000000..d4ecf80
--- /dev/null
+++ b/src/lib/dhcp/tests/option4_addrlst_unittest.cc
@@ -0,0 +1,273 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <config.h>
+#include <iostream>
+#include <sstream>
+#include <arpa/inet.h>
+#include <gtest/gtest.h>
+#include <asiolink/io_address.h>
+#include <dhcp/dhcp4.h>
+#include <dhcp/option.h>
+#include <dhcp/option4_addrlst.h>
+#include <util/buffer.h>
+
+using namespace std;
+using namespace isc;
+using namespace isc::dhcp;
+using namespace isc::asiolink;
+using namespace isc::util;
+
+namespace {
+
+// a sample data (list of 4 addresses)
+const uint8_t sampledata[] = {
+    192, 0, 2, 3,     // 192.0.2.3
+    255, 255, 255, 0, // 255.255.255.0 - popular netmask
+    0, 0, 0 , 0,      // used for default routes or (any address)
+    127, 0, 0, 1      // loopback
+};
+
+// expected on-wire format for an option with 1 address
+const uint8_t expected1[] = { // 1 address
+    DHO_DOMAIN_NAME_SERVERS, 4, // type, length
+    192, 0, 2, 3,     // 192.0.2.3
+};
+
+// expected on-wire format for an option with 4 addresses
+const uint8_t expected4[] = { // 4 addresses
+    254, 16,            // type = 254, len = 16
+    192, 0, 2, 3,       // 192.0.2.3
+    255, 255, 255, 0,   // 255.255.255.0 - popular netmask
+    0, 0, 0 ,0,         // used for default routes or (any address)
+    127, 0, 0, 1        // loopback
+};
+
+class Option4AddrLstTest : public ::testing::Test {
+protected:
+
+    Option4AddrLstTest():
+        vec_(vector<uint8_t>(300,0)) // 300 bytes long filled with 0s
+    {
+        sampleAddrs_.push_back(IOAddress("192.0.2.3"));
+        sampleAddrs_.push_back(IOAddress("255.255.255.0"));
+        sampleAddrs_.push_back(IOAddress("0.0.0.0"));
+        sampleAddrs_.push_back(IOAddress("127.0.0.1"));
+    }
+
+    vector<uint8_t> vec_;
+    Option4AddrLst::AddressContainer sampleAddrs_;
+
+};
+
+TEST_F(Option4AddrLstTest, parse1) {
+
+    memcpy(&vec_[0], sampledata, sizeof(sampledata));
+
+    // just one address
+    Option4AddrLst* opt1 = 0;
+    EXPECT_NO_THROW(
+        opt1 = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS,
+                                  vec_.begin(),
+                                  vec_.begin()+4);
+        // use just first address (4 bytes), not the whole
+        // sampledata
+    );
+
+    EXPECT_EQ(Option::V4, opt1->getUniverse());
+
+    EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt1->getType());
+    EXPECT_EQ(6, opt1->len()); // 2 (header) + 4 (1x IPv4 addr)
+
+    Option4AddrLst::AddressContainer addrs = opt1->getAddresses();
+    ASSERT_EQ(1, addrs.size());
+
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+    EXPECT_NO_THROW(
+        delete opt1;
+        opt1 = 0;
+    );
+
+    // 1 address
+}
+
+TEST_F(Option4AddrLstTest, parse4) {
+
+    vector<uint8_t> buffer(300,0); // 300 bytes long filled with 0s
+
+    memcpy(&buffer[0], sampledata, sizeof(sampledata));
+
+    // 4 addresses
+    Option4AddrLst* opt4 = 0;
+    EXPECT_NO_THROW(
+        opt4 = new Option4AddrLst(254,
+                                  buffer.begin(),
+                                  buffer.begin()+sizeof(sampledata));
+    );
+
+    EXPECT_EQ(Option::V4, opt4->getUniverse());
+
+    EXPECT_EQ(254, opt4->getType());
+    EXPECT_EQ(18, opt4->len()); // 2 (header) + 16 (4x IPv4 addrs)
+
+    Option4AddrLst::AddressContainer addrs = opt4->getAddresses();
+    ASSERT_EQ(4, addrs.size());
+
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    EXPECT_NO_THROW(
+        delete opt4;
+        opt4 = 0;
+    );
+}
+
+TEST_F(Option4AddrLstTest, assembly1) {
+
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("192.0.2.3"));
+    );
+    EXPECT_EQ(Option::V4, opt->getUniverse());
+    EXPECT_EQ(DHO_DOMAIN_NAME_SERVERS, opt->getType());
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(1, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW(
+        opt->pack4(buf);
+    );
+
+    ASSERT_EQ(6, opt->len());
+    ASSERT_EQ(6, buf.getLength());
+
+    EXPECT_EQ(0, memcmp(expected1, buf.getData(), 6));
+
+    EXPECT_NO_THROW(
+        delete opt;
+        opt = 0;
+    );
+
+    // This is old-fashioned option. We don't serve IPv6 types here!
+    EXPECT_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, IOAddress("2001:db8::1")),
+        BadValue
+    );
+    if (opt) {
+        // test failed. Execption was not thrown, but option was created instead.
+        delete opt;
+    }
+}
+
+TEST_F(Option4AddrLstTest, assembly4) {
+
+
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(254, sampleAddrs_);
+    );
+    EXPECT_EQ(Option::V4, opt->getUniverse());
+    EXPECT_EQ(254, opt->getType());
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(4, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    OutputBuffer buf(100);
+    EXPECT_NO_THROW(
+        opt->pack4(buf);
+    );
+
+    ASSERT_EQ(18, opt->len()); // 2(header) + 4xsizeof(IPv4addr)
+    ASSERT_EQ(18, buf.getLength());
+
+    ASSERT_EQ(0, memcmp(expected4, buf.getData(), 18));
+
+    EXPECT_NO_THROW(
+        delete opt;
+        opt = 0;
+    );
+
+    // This is old-fashioned option. We don't serve IPv6 types here!
+    sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+    EXPECT_THROW(
+        opt = new Option4AddrLst(DHO_DOMAIN_NAME_SERVERS, sampleAddrs_),
+        BadValue
+    );
+    if (opt) {
+        // test failed. Execption was not thrown, but option was created instead.
+        delete opt;
+    }
+}
+
+TEST_F(Option4AddrLstTest, setAddress) {
+    Option4AddrLst* opt = 0;
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(123, IOAddress("1.2.3.4"));
+    );
+    opt->setAddress(IOAddress("192.0.255.255"));
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(1, addrs.size() );
+    EXPECT_EQ("192.0.255.255", addrs[0].toText());
+
+    // We should accept IPv4-only addresses.
+    EXPECT_THROW(
+        opt->setAddress(IOAddress("2001:db8::1")),
+        BadValue
+    );
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+TEST_F(Option4AddrLstTest, setAddresses) {
+
+    Option4AddrLst* opt = 0;
+
+    EXPECT_NO_THROW(
+        opt = new Option4AddrLst(123); // empty list
+    );
+
+    opt->setAddresses(sampleAddrs_);
+
+    Option4AddrLst::AddressContainer addrs = opt->getAddresses();
+    ASSERT_EQ(4, addrs.size() );
+    EXPECT_EQ("192.0.2.3", addrs[0].toText());
+    EXPECT_EQ("255.255.255.0", addrs[1].toText());
+    EXPECT_EQ("0.0.0.0", addrs[2].toText());
+    EXPECT_EQ("127.0.0.1", addrs[3].toText());
+
+    // We should accept IPv4-only addresses.
+    sampleAddrs_.push_back(IOAddress("2001:db8::1"));
+    EXPECT_THROW(
+        opt->setAddresses(sampleAddrs_),
+        BadValue
+    );
+
+    EXPECT_NO_THROW(
+        delete opt;
+    );
+}
+
+} // namespace
diff --git a/src/lib/dhcp/tests/option_unittest.cc b/src/lib/dhcp/tests/option_unittest.cc
index db3ee3b..66dce8f 100644
--- a/src/lib/dhcp/tests/option_unittest.cc
+++ b/src/lib/dhcp/tests/option_unittest.cc
@@ -402,6 +402,8 @@ TEST_F(OptionTest, v6_addgetdel) {
 
     // let's try to delete - should fail
     EXPECT_TRUE(false ==  parent->delOption(2));
+
+    delete parent;
 }
 
 }
diff --git a/src/lib/dhcp/tests/pkt4_unittest.cc b/src/lib/dhcp/tests/pkt4_unittest.cc
index c89743f..091bfac 100644
--- a/src/lib/dhcp/tests/pkt4_unittest.cc
+++ b/src/lib/dhcp/tests/pkt4_unittest.cc
@@ -487,13 +487,15 @@ TEST(Pkt4Test, options) {
 
     const OutputBuffer& buf = pkt->getBuffer();
     // check that all options are stored, they should take sizeof(v4Opts)
-    ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts),
+    // there also should be OPTION_END added (just one byte)
+    ASSERT_EQ(static_cast<size_t>(Pkt4::DHCPV4_PKT_HDR_LEN) + sizeof(v4Opts) + 1,
               buf.getLength());
 
     // that that this extra data actually contain our options
     const uint8_t* ptr = static_cast<const uint8_t*>(buf.getData());
     ptr += Pkt4::DHCPV4_PKT_HDR_LEN; // rewind to end of fixed part
     EXPECT_EQ(0, memcmp(ptr, v4Opts, sizeof(v4Opts)));
+    EXPECT_EQ(DHO_END, static_cast<uint8_t>(*(ptr + sizeof(v4Opts))));
 
     EXPECT_NO_THROW(
         delete pkt;
@@ -559,4 +561,17 @@ TEST(Pkt4Test, unpackOptions) {
     EXPECT_EQ(0, memcmp(&x->getData()[0], v4Opts+22, 3)); // data len=3
 }
 
+// This test verifies methods that are used for manipulating meta fields
+// i.e. fields that are not part of DHCPv4 (e.g. interface name).
+TEST(Pkt4Ttest, metaFields) {
+    Pkt4 pkt(DHCPDISCOVER, 1234);
+
+    pkt.setIface("lo0");
+
+    EXPECT_EQ("lo0", pkt.getIface());
+
+    /// TODO: Expand this test once additonal getters/setters are
+    /// implemented.
+}
+
 } // end of anonymous namespace
diff --git a/src/lib/dns/Makefile.am b/src/lib/dns/Makefile.am
index 0d2bffd..5b93f75 100644
--- a/src/lib/dns/Makefile.am
+++ b/src/lib/dns/Makefile.am
@@ -84,6 +84,8 @@ BUILT_SOURCES += rdataclass.h rdataclass.cc
 
 lib_LTLIBRARIES = libdns++.la
 
+libdns___la_LDFLAGS = -no-undefined -version-info 1:0:1
+
 libdns___la_SOURCES =
 libdns___la_SOURCES += edns.h edns.cc
 libdns___la_SOURCES += exceptions.h exceptions.cc
@@ -102,6 +104,7 @@ libdns___la_SOURCES += rrsetlist.h rrsetlist.cc
 libdns___la_SOURCES += rrttl.h rrttl.cc
 libdns___la_SOURCES += rrtype.cc
 libdns___la_SOURCES += question.h question.cc
+libdns___la_SOURCES += serial.h serial.cc
 libdns___la_SOURCES += tsig.h tsig.cc
 libdns___la_SOURCES += tsigerror.h tsigerror.cc
 libdns___la_SOURCES += tsigkey.h tsigkey.cc
diff --git a/src/lib/dns/python/Makefile.am b/src/lib/dns/python/Makefile.am
index 3b89358..dd14991 100644
--- a/src/lib/dns/python/Makefile.am
+++ b/src/lib/dns/python/Makefile.am
@@ -12,6 +12,7 @@ libpydnspp_la_SOURCES += rrclass_python.cc rrclass_python.h
 libpydnspp_la_SOURCES += rrtype_python.cc rrtype_python.h
 libpydnspp_la_SOURCES += rrttl_python.cc rrttl_python.h
 libpydnspp_la_SOURCES += rdata_python.cc rdata_python.h
+libpydnspp_la_SOURCES += serial_python.cc serial_python.h
 libpydnspp_la_SOURCES += messagerenderer_python.cc messagerenderer_python.h
 libpydnspp_la_SOURCES += rcode_python.cc rcode_python.h
 libpydnspp_la_SOURCES += opcode_python.cc opcode_python.h
diff --git a/src/lib/dns/python/pydnspp.cc b/src/lib/dns/python/pydnspp.cc
index 0a7d8e5..212141c 100644
--- a/src/lib/dns/python/pydnspp.cc
+++ b/src/lib/dns/python/pydnspp.cc
@@ -49,6 +49,7 @@
 #include "rrset_python.h"
 #include "rrttl_python.h"
 #include "rrtype_python.h"
+#include "serial_python.h"
 #include "tsigerror_python.h"
 #include "tsigkey_python.h"
 #include "tsig_python.h"
@@ -492,6 +493,18 @@ initModulePart_RRType(PyObject* mod) {
 }
 
 bool
+initModulePart_Serial(PyObject* mod) {
+    if (PyType_Ready(&serial_type) < 0) {
+        return (false);
+    }
+    Py_INCREF(&serial_type);
+    PyModule_AddObject(mod, "Serial",
+                       reinterpret_cast<PyObject*>(&serial_type));
+
+    return (true);
+}
+
+bool
 initModulePart_TSIGError(PyObject* mod) {
     if (PyType_Ready(&tsigerror_type) < 0) {
         return (false);
@@ -804,6 +817,10 @@ PyInit_pydnspp(void) {
         return (NULL);
     }
 
+    if (!initModulePart_Serial(mod)) {
+        return (NULL);
+    }
+
     if (!initModulePart_TSIGKey(mod)) {
         return (NULL);
     }
diff --git a/src/lib/dns/python/serial_python.cc b/src/lib/dns/python/serial_python.cc
new file mode 100644
index 0000000..e2bd809
--- /dev/null
+++ b/src/lib/dns/python/serial_python.cc
@@ -0,0 +1,281 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <Python.h>
+
+#include <dns/serial.h>
+#include <util/python/pycppwrapper_util.h>
+
+#include "serial_python.h"
+#include "pydnspp_common.h"
+
+using namespace std;
+using namespace isc::dns;
+using namespace isc::dns::python;
+using namespace isc::util;
+using namespace isc::util::python;
+
+namespace {
+// The s_* Class simply covers one instantiation of the object
+class s_Serial : public PyObject {
+public:
+    s_Serial() : cppobj(NULL) {};
+    isc::dns::Serial* cppobj;
+};
+
+typedef CPPPyObjectContainer<s_Serial, Serial> SerialContainer;
+
+PyObject* Serial_str(PyObject* self);
+PyObject* Serial_getValue(s_Serial* self);
+PyObject* Serial_richcmp(s_Serial* self, s_Serial* other, int op);
+PyObject* Serial_add(PyObject *right, PyObject *left);
+
+// This list contains the actual set of functions we have in
+// python. Each entry has
+// 1. Python method name
+// 2. Our static function here
+// 3. Argument type
+// 4. Documentation
+PyMethodDef Serial_methods[] = {
+    { "get_value", reinterpret_cast<PyCFunction>(Serial_getValue), METH_NOARGS,
+      "Returns the Serial value as an integer" },
+    { NULL, NULL, 0, NULL }
+};
+
+// For overriding the + operator. We do not define any other operators for
+// this type.
+PyNumberMethods Serial_NumberMethods = {
+    Serial_add, //nb_add;
+    NULL, //nb_subtract;
+    NULL, //nb_multiply;
+    NULL, //nb_remainder;
+    NULL, //nb_divmod;
+    NULL, //nb_power;
+    NULL, //nb_negative;
+    NULL, //nb_positive;
+    NULL, //nb_absolute;
+    NULL, //nb_bool;
+    NULL, //nb_invert;
+    NULL, //nb_lshift;
+    NULL, //nb_rshift;
+    NULL, //nb_and;
+    NULL, //nb_xor;
+    NULL, //nb_or;
+    NULL, //nb_int;
+    NULL, //nb_reserved;
+    NULL, //nb_float;
+
+    NULL, //nb_inplace_add;
+    NULL, //nb_inplace_subtract;
+    NULL, //nb_inplace_multiply;
+    NULL, //nb_inplace_remainder;
+    NULL, //nb_inplace_power;
+    NULL, //nb_inplace_lshift;
+    NULL, //nb_inplace_rshift;
+    NULL, //nb_inplace_and;
+    NULL, //nb_inplace_xor;
+    NULL, //nb_inplace_or;
+
+    NULL, //nb_floor_divide;
+    NULL, //nb_true_divide;
+    NULL, //nb_inplace_floor_divide;
+    NULL, //nb_inplace_true_divide;
+
+    NULL, //nb_index;
+};
+
+int
+Serial_init(s_Serial* self, PyObject* args) {
+    long long i;
+    if (PyArg_ParseTuple(args, "L", &i)) {
+        PyErr_Clear();
+        if (i < 0 || i > 0xffffffff) {
+            PyErr_SetString(PyExc_ValueError, "Serial number out of range");
+            return (-1);
+        }
+        self->cppobj = new Serial(i);
+        return (0);
+    } else {
+        return (-1);
+    }
+}
+
+void
+Serial_destroy(s_Serial* self) {
+    delete self->cppobj;
+    self->cppobj = NULL;
+    Py_TYPE(self)->tp_free(self);
+}
+
+PyObject*
+Serial_getValue(s_Serial* self) {
+    return (Py_BuildValue("I", self->cppobj->getValue()));
+}
+
+PyObject*
+Serial_str(PyObject* po_self) {
+    const s_Serial* const self = static_cast<s_Serial*>(po_self);
+    return (PyUnicode_FromFormat("%u", self->cppobj->getValue()));
+}
+
+PyObject*
+Serial_richcmp(s_Serial* self, s_Serial* other, int op) {
+    bool c = false;
+
+    // Check for null and if the types match. If different type,
+    // simply return False
+    if (!other || (self->ob_type != other->ob_type)) {
+        Py_RETURN_FALSE;
+    }
+
+    switch (op) {
+    case Py_LT:
+        c = *self->cppobj < *other->cppobj;
+        break;
+    case Py_LE:
+        c = *self->cppobj <= *other->cppobj;
+        break;
+    case Py_EQ:
+        c = *self->cppobj == *other->cppobj;
+        break;
+    case Py_NE:
+        c = *self->cppobj != *other->cppobj;
+        break;
+    case Py_GT:
+        c = *self->cppobj > *other->cppobj;
+        break;
+    case Py_GE:
+        c = *self->cppobj >= *other->cppobj;
+        break;
+    }
+    if (c) {
+        Py_RETURN_TRUE;
+    } else {
+        Py_RETURN_FALSE;
+    }
+}
+
+PyObject *
+Serial_add(PyObject *left, PyObject *right) {
+    // Either can be either a serial or a long, as long as one of them is a
+    // serial
+    if (PySerial_Check(left) && PySerial_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(left) +
+                                   PySerial_ToSerial(right)));
+    } else if (PySerial_Check(left) && PyLong_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(left) +
+                                   PyLong_AsLong(right)));
+    } else if (PyLong_Check(left) && PySerial_Check(right)) {
+        return (createSerialObject(PySerial_ToSerial(right) +
+                                   PyLong_AsLong(left)));
+    } else {
+        Py_INCREF(Py_NotImplemented);
+        return Py_NotImplemented;
+    }
+}
+
+} // end anonymous namespace
+
+namespace isc {
+namespace dns {
+namespace python {
+// This defines the complete type for reflection in python and
+// parsing of PyObject* to s_Serial
+// Most of the functions are not actually implemented and NULL here.
+PyTypeObject serial_type = {
+    PyVarObject_HEAD_INIT(NULL, 0)
+    "pydnspp.Serial",
+    sizeof(s_Serial),                   // tp_basicsize
+    0,                                  // tp_itemsize
+    (destructor)Serial_destroy,         // tp_dealloc
+    NULL,                               // tp_print
+    NULL,                               // tp_getattr
+    NULL,                               // tp_setattr
+    NULL,                               // tp_reserved
+    NULL,                               // tp_repr
+    &Serial_NumberMethods,              // tp_as_number
+    NULL,                               // tp_as_sequence
+    NULL,                               // tp_as_mapping
+    NULL,                               // tp_hash
+    NULL,                               // tp_call
+    Serial_str,                         // tp_str
+    NULL,                               // tp_getattro
+    NULL,                               // tp_setattro
+    NULL,                               // tp_as_buffer
+    Py_TPFLAGS_DEFAULT,                 // tp_flags
+    "The Serial class encapsulates Serials used in DNS SOA records.\n\n"
+    "This is a straightforward class; an Serial object simply maintains a "
+    "32-bit unsigned integer corresponding to the SOA SERIAL value.  The "
+    "main purpose of this class is to provide serial number arithmetic, as "
+    "described in RFC 1892. Objects of this type can be compared and added "
+    "to each other, as described in RFC 1892. Apart from str(), get_value(), "
+    "comparison operators, and the + operator, no other operations are "
+    "defined for this type.",
+    NULL,                               // tp_traverse
+    NULL,                               // tp_clear
+    (richcmpfunc)Serial_richcmp,        // tp_richcompare
+    0,                                  // tp_weaklistoffset
+    NULL,                               // tp_iter
+    NULL,                               // tp_iternext
+    Serial_methods,                     // tp_methods
+    NULL,                               // tp_members
+    NULL,                               // tp_getset
+    NULL,                               // tp_base
+    NULL,                               // tp_dict
+    NULL,                               // tp_descr_get
+    NULL,                               // tp_descr_set
+    0,                                  // tp_dictoffset
+    (initproc)Serial_init,              // tp_init
+    NULL,                               // tp_alloc
+    PyType_GenericNew,                  // tp_new
+    NULL,                               // tp_free
+    NULL,                               // tp_is_gc
+    NULL,                               // tp_bases
+    NULL,                               // tp_mro
+    NULL,                               // tp_cache
+    NULL,                               // tp_subclasses
+    NULL,                               // tp_weaklist
+    NULL,                               // tp_del
+    0                                   // tp_version_tag
+};
+
+PyObject*
+createSerialObject(const Serial& source) {
+    SerialContainer container(PyObject_New(s_Serial, &serial_type));
+    container.set(new Serial(source));
+    return (container.release());
+}
+
+bool
+PySerial_Check(PyObject* obj) {
+    if (obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Serial typecheck");
+    }
+    return (PyObject_TypeCheck(obj, &serial_type));
+}
+
+const Serial&
+PySerial_ToSerial(const PyObject* serial_obj) {
+    if (serial_obj == NULL) {
+        isc_throw(PyCPPWrapperException,
+                  "obj argument NULL in Serial PyObject conversion");
+    }
+    const s_Serial* serial = static_cast<const s_Serial*>(serial_obj);
+    return (*serial->cppobj);
+}
+
+} // namespace python
+} // namespace dns
+} // namespace isc
diff --git a/src/lib/dns/python/serial_python.h b/src/lib/dns/python/serial_python.h
new file mode 100644
index 0000000..48b5199
--- /dev/null
+++ b/src/lib/dns/python/serial_python.h
@@ -0,0 +1,64 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __PYTHON_SERIAL_H
+#define __PYTHON_SERIAL_H 1
+
+#include <Python.h>
+
+namespace isc {
+namespace dns {
+class Serial;
+
+namespace python {
+
+extern PyTypeObject serial_type;
+
+/// This is a simple shortcut to create a python Serial object (in the
+/// form of a pointer to PyObject) with minimal exception safety.
+/// On success, it returns a valid pointer to PyObject with a reference
+/// counter of 1; if something goes wrong it throws an exception (it never
+/// returns a NULL pointer).
+/// This function is expected to be called within a try block
+/// followed by necessary setup for python exception.
+PyObject* createSerialObject(const Serial& source);
+
+/// \brief Checks if the given python object is a Serial object
+///
+/// \exception PyCPPWrapperException if obj is NULL
+///
+/// \param obj The object to check the type of
+/// \return true if the object is of type Serial, false otherwise
+bool PySerial_Check(PyObject* obj);
+
+/// \brief Returns a reference to the Serial object contained within the given
+///        Python object.
+///
+/// \note The given object MUST be of type Serial; this can be checked with
+///       either the right call to ParseTuple("O!"), or with PySerial_Check()
+///
+/// \note This is not a copy; if the Serial is needed when the PyObject
+/// may be destroyed, the caller must copy it itself.
+///
+/// \param Serial_obj The Serial object to convert
+const Serial& PySerial_ToSerial(const PyObject* Serial_obj);
+
+} // namespace python
+} // namespace dns
+} // namespace isc
+#endif // __PYTHON_SERIAL_H
+
+// Local Variables:
+// mode: c++
+// End:
diff --git a/src/lib/dns/python/tests/Makefile.am b/src/lib/dns/python/tests/Makefile.am
index d1273f3..3338727 100644
--- a/src/lib/dns/python/tests/Makefile.am
+++ b/src/lib/dns/python/tests/Makefile.am
@@ -11,6 +11,7 @@ PYTESTS += rrclass_python_test.py
 PYTESTS += rrset_python_test.py
 PYTESTS += rrttl_python_test.py
 PYTESTS += rrtype_python_test.py
+PYTESTS += serial_python_test.py
 PYTESTS += tsig_python_test.py
 PYTESTS += tsig_rdata_python_test.py
 PYTESTS += tsigerror_python_test.py
diff --git a/src/lib/dns/python/tests/serial_python_test.py b/src/lib/dns/python/tests/serial_python_test.py
new file mode 100644
index 0000000..0ca08c2
--- /dev/null
+++ b/src/lib/dns/python/tests/serial_python_test.py
@@ -0,0 +1,111 @@
+# Copyright (C) 2011  Internet Systems Consortium.
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+#
+# Tests for the rrttl part of the pydnspp module
+#
+
+import unittest
+import os
+from pydnspp import *
+
+class SerialTest(unittest.TestCase):
+    def setUp(self):
+        self.one = Serial(1)
+        self.one_2 = Serial(1)
+        self.two = Serial(2)
+        self.date_zero = Serial(1980120100)
+        self.date_one = Serial(1980120101)
+        self.zero = Serial(0)
+        self.highest = Serial(4294967295)
+        self.number_low = Serial(12345)
+        self.number_medium = Serial(2000000000)
+        self.number_high = Serial(4000000000)
+
+    def test_init(self):
+        self.assertRaises(ValueError, Serial, -1)
+        self.assertRaises(ValueError, Serial, 4294967296)
+        self.assertRaises(ValueError, Serial, 4294967297)
+        self.assertRaises(ValueError, Serial, 100000000000)
+
+    def test_get_value(self):
+        self.assertEqual(1, self.one.get_value())
+        self.assertNotEqual(2, self.one_2.get_value())
+        self.assertEqual(2, self.two.get_value())
+        self.assertEqual(1980120100, self.date_zero.get_value())
+        self.assertEqual(1980120101, self.date_one.get_value())
+        self.assertEqual(0, self.zero.get_value())
+        self.assertEqual(4294967295, self.highest.get_value())
+        self.assertEqual(12345, self.number_low.get_value())
+        self.assertEqual(2000000000, self.number_medium.get_value())
+        self.assertEqual(4000000000, self.number_high.get_value())
+
+    def test_str(self):
+        self.assertEqual('1', str(self.one))
+        self.assertNotEqual('2', str(self.one_2))
+        self.assertEqual('2', str(self.two))
+        self.assertEqual('1980120100', str(self.date_zero))
+        self.assertEqual('1980120101', str(self.date_one))
+        self.assertEqual('0', str(self.zero))
+        self.assertEqual('4294967295', str(self.highest))
+        self.assertEqual('12345', str(self.number_low))
+        self.assertEqual('2000000000', str(self.number_medium))
+        self.assertEqual('4000000000', str(self.number_high))
+
+    def test_equals(self):
+        self.assertEqual(self.one, self.one)
+        self.assertEqual(self.one, self.one_2)
+        self.assertNotEqual(self.one, self.two)
+        self.assertNotEqual(self.two, self.one)
+        self.assertEqual(Serial(12345), self.number_low)
+        self.assertNotEqual(Serial(12346), self.number_low)
+
+    def test_compare(self):
+        # These should be true/false even without serial arithmetic
+        self.assertLessEqual(self.one, self.one)
+        self.assertLessEqual(self.one, self.one_2)
+        self.assertLess(self.one, self.two)
+        self.assertLessEqual(self.one, self.one)
+        self.assertLessEqual(self.one, self.two)
+        self.assertGreater(self.two, self.one)
+        self.assertGreaterEqual(self.two, self.two)
+        self.assertGreaterEqual(self.two, self.one)
+        self.assertLess(self.one, self.number_low)
+        self.assertLess(self.number_low, self.number_medium)
+        self.assertLess(self.number_medium, self.number_high)
+
+        # These should 'wrap'
+        self.assertGreater(self.zero, self.highest)
+        self.assertLess(self.highest, self.one)
+        self.assertLess(self.number_high, self.number_low)
+
+    def test_addition(self):
+        self.assertEqual(self.two, self.one + self.one)
+        self.assertEqual(self.two, self.one + self.one_2)
+        self.assertEqual(self.highest, self.highest + self.zero)
+        self.assertEqual(self.zero, self.highest + self.one)
+        self.assertEqual(self.one, self.highest + self.two)
+        self.assertEqual(self.one, self.highest + self.one + self.one)
+        self.assertEqual(self.one + 100, self.highest + 102)
+        self.assertEqual(100 + self.one, self.highest + 102)
+        self.assertEqual(self.zero + 2147483645, self.highest + 2147483646)
+
+        # using lambda so the error doesn't get thrown on initial evaluation
+        self.assertRaises(TypeError, lambda: self.zero + "bad")
+        self.assertRaises(TypeError, lambda: self.zero + None)
+        self.assertRaises(TypeError, lambda: "bad" + self.zero)
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/lib/dns/rdata/generic/soa_6.cc b/src/lib/dns/rdata/generic/soa_6.cc
index 875a957..e473bca 100644
--- a/src/lib/dns/rdata/generic/soa_6.cc
+++ b/src/lib/dns/rdata/generic/soa_6.cc
@@ -106,10 +106,10 @@ SOA::toWire(AbstractMessageRenderer& renderer) const {
     renderer.writeData(numdata_, sizeof(numdata_));
 }
 
-uint32_t
+Serial
 SOA::getSerial() const {
     InputBuffer b(numdata_, sizeof(numdata_));
-    return (b.readUint32());
+    return (Serial(b.readUint32()));
 }
 
 string
diff --git a/src/lib/dns/rdata/generic/soa_6.h b/src/lib/dns/rdata/generic/soa_6.h
index 4c6b6ec..2c180b2 100644
--- a/src/lib/dns/rdata/generic/soa_6.h
+++ b/src/lib/dns/rdata/generic/soa_6.h
@@ -18,6 +18,7 @@
 
 #include <dns/name.h>
 #include <dns/rdata.h>
+#include <dns/serial.h>
 
 // BEGIN_ISC_NAMESPACE
 
@@ -35,7 +36,7 @@ public:
         uint32_t refresh, uint32_t retry, uint32_t expire,
         uint32_t minimum);
     /// \brief Returns the serial stored in the SOA.
-    uint32_t getSerial() const;
+    Serial getSerial() const;
 private:
     /// Note: this is a prototype version; we may reconsider
     /// this representation later.
diff --git a/src/lib/dns/serial.cc b/src/lib/dns/serial.cc
new file mode 100644
index 0000000..90bc242
--- /dev/null
+++ b/src/lib/dns/serial.cc
@@ -0,0 +1,76 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <dns/serial.h>
+
+namespace isc {
+namespace dns {
+
+bool
+Serial::operator==(const Serial& other) const {
+    return (value_ == other.getValue());
+}
+
+bool
+Serial::operator!=(const Serial& other) const {
+    return (value_ != other.getValue());
+}
+
+bool
+Serial::operator<(const Serial& other) const {
+    uint32_t other_val = other.getValue();
+    bool result = false;
+    if (value_ < other_val) {
+        result = ((other_val - value_) <= MAX_SERIAL_INCREMENT);
+    } else if (other_val < value_) {
+        result = ((value_ - other_val) > MAX_SERIAL_INCREMENT);
+    }
+    return (result);
+}
+
+bool
+Serial::operator<=(const Serial& other) const {
+    return (operator==(other) || operator<(other));
+}
+
+bool
+Serial::operator>(const Serial& other) const {
+    return (!operator==(other) && !operator<(other));
+}
+
+bool
+Serial::operator>=(const Serial& other) const {
+    return (!operator<(other));
+}
+
+Serial
+Serial::operator+(uint32_t other_val) const {
+    uint64_t new_val = static_cast<uint64_t>(value_) +
+                       static_cast<uint64_t>(other_val);
+    return Serial(static_cast<uint32_t>(new_val % MAX_SERIAL_VALUE));
+}
+
+Serial
+Serial::operator+(const Serial& other) const {
+    return (operator+(other.getValue()));
+}
+
+std::ostream&
+operator<<(std::ostream& os, const Serial& serial) {
+    return (os << serial.getValue());
+}
+
+} // end namespace dns
+} // end namespace isc
+
diff --git a/src/lib/dns/serial.h b/src/lib/dns/serial.h
new file mode 100644
index 0000000..3549860
--- /dev/null
+++ b/src/lib/dns/serial.h
@@ -0,0 +1,155 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#ifndef __SERIAL_H
+#define __SERIAL_H 1
+
+#include <stdint.h>
+#include <iostream>
+
+namespace isc {
+namespace dns {
+
+/// The maximum difference between two serial numbers. If the (plain uint32_t)
+/// difference between two serials is greater than this number, the smaller one
+/// is considered greater.
+const uint32_t MAX_SERIAL_INCREMENT = 2147483647;
+
+/// Maximum value a serial can have, used in + operator.
+const uint64_t MAX_SERIAL_VALUE = 4294967296ull;
+
+/// \brief This class defines DNS serial numbers and serial arithmetic.
+///
+/// DNS Serial number are in essence unsigned 32-bits numbers, with one
+/// catch; they should be compared using sequence space arithmetic.
+/// So given that they are 32-bits; as soon as the difference between two
+/// serial numbers is greater than 2147483647 (2^31 - 1), the lower number
+/// (in plain comparison) is considered the higher one.
+///
+/// In order to do this as transparently as possible, these numbers are
+/// stored in the Serial class, which overrides the basic comparison operators.
+///
+/// In this specific context, these operations are called 'serial number
+/// arithmetic', and they are defined in RFC 1982.
+///
+/// \note RFC 1982 defines everything based on the value SERIAL_BITS. Since
+/// the serial number has a fixed length of 32 bits, the values we use are
+/// hard-coded, and not computed based on variable bit lengths.
+class Serial {
+public:
+    /// \brief Constructor with value
+    ///
+    /// \param value The uint32_t value of the serial
+    explicit Serial(uint32_t value) : value_(value) {}
+
+    /// \brief Copy constructor
+    Serial(const Serial& other) : value_(other.getValue()) {}
+
+    /// \brief Direct assignment from other Serial
+    ///
+    /// \param other The Serial to assign the value from
+    void operator=(const Serial& other) { value_ = other.getValue(); }
+
+    /// \brief Direct assignment from value
+    ///
+    /// \param value the uint32_t value to assing
+    void operator=(uint32_t value) { value_ = value; }
+
+    /// \brief Returns the uint32_t representation of this serial value
+    ///
+    /// \return The uint32_t value of this Serial
+    uint32_t getValue() const { return (value_); }
+
+    /// \brief Returns true if the serial values are equal
+    ///
+    /// \return True if the values are equal
+    bool operator==(const Serial& other) const;
+
+    /// \brief Returns true if the serial values are not equal
+    ///
+    /// \return True if the values are not equal
+    bool operator!=(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is smaller than
+    /// the other, according to serial arithmetic as described in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is smaller than the given value
+    bool operator<(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is equal to or
+    /// smaller than the other, according to serial arithmetic as described
+    /// in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is smaller than or equal to the given value
+    bool operator<=(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is greater than
+    /// the other, according to serial arithmetic as described in RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is greater than the given value
+    bool operator>(const Serial& other) const;
+
+    /// \brief Returns true if the serial value of this serial is equal to or
+    /// greater than the other, according to serial arithmetic as described in
+    /// RFC 1982
+    ///
+    /// \param other The Serial to compare to
+    ///
+    /// \return True if this is greater than or equal to the given value
+    bool operator>=(const Serial& other) const;
+
+    /// \brief Adds the given value to the serial number. If this would make
+    /// the number greater than 2^32-1, it is 'wrapped'.
+    /// \note According to the specification, an addition greater than
+    /// MAX_SERIAL_INCREMENT is undefined. We do NOT catch this error (so as not
+    /// to raise exceptions), but this behaviour remains undefined.
+    ///
+    /// \param other The Serial to add
+    ///
+    /// \return The result of the addition
+    Serial operator+(const Serial& other) const;
+
+    /// \brief Adds the given value to the serial number. If this would make
+    /// the number greater than 2^32-1, it is 'wrapped'.
+    ///
+    /// \note According to the specification, an addition greater than
+    /// MAX_SERIAL_INCREMENT is undefined. We do NOT catch this error (so as not
+    /// to raise exceptions), but this behaviour remains undefined.
+    ///
+    /// \param other_val The uint32_t value to add
+    ///
+    /// \return The result of the addition
+    Serial operator+(uint32_t other_val) const;
+
+private:
+    uint32_t value_;
+};
+
+/// \brief Helper operator for output streams, writes the value to the stream
+///
+/// \param os The ostream to write to
+/// \param serial The Serial to write
+/// \return the output stream
+std::ostream& operator<<(std::ostream& os, const Serial& serial);
+
+} // end namespace dns
+} // end namespace isc
+
+#endif // __SERIAL_H
diff --git a/src/lib/dns/tests/Makefile.am b/src/lib/dns/tests/Makefile.am
index ceeb3b8..cfd1286 100644
--- a/src/lib/dns/tests/Makefile.am
+++ b/src/lib/dns/tests/Makefile.am
@@ -54,6 +54,7 @@ run_unittests_SOURCES += question_unittest.cc
 run_unittests_SOURCES += rrparamregistry_unittest.cc
 run_unittests_SOURCES += masterload_unittest.cc
 run_unittests_SOURCES += message_unittest.cc
+run_unittests_SOURCES += serial_unittest.cc
 run_unittests_SOURCES += tsig_unittest.cc
 run_unittests_SOURCES += tsigerror_unittest.cc
 run_unittests_SOURCES += tsigkey_unittest.cc
@@ -61,12 +62,12 @@ run_unittests_SOURCES += tsigrecord_unittest.cc
 run_unittests_SOURCES += character_string_unittest.cc
 run_unittests_SOURCES += run_unittests.cc
 run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-# We shouldn't need to include BOTAN_LDFLAGS here, but there
+# We shouldn't need to include BOTAN_LIBS here, but there
 # is one test system where the path for GTEST_LDFLAGS contains
 # an older version of botan, and somehow that version gets
 # linked if we don't
-run_unittests_LDFLAGS = $(AM_LDFLAGS) $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS)
-run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDFLAGS = $(BOTAN_LDFLAGS) $(GTEST_LDFLAGS) $(AM_LDFLAGS)
+run_unittests_LDADD = $(BOTAN_LIBS) $(GTEST_LDADD)
 run_unittests_LDADD += $(top_builddir)/src/lib/dns/libdns++.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/libutil.la
 run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
diff --git a/src/lib/dns/tests/rdata_soa_unittest.cc b/src/lib/dns/tests/rdata_soa_unittest.cc
index 17498eb..07c24d5 100644
--- a/src/lib/dns/tests/rdata_soa_unittest.cc
+++ b/src/lib/dns/tests/rdata_soa_unittest.cc
@@ -76,7 +76,7 @@ TEST_F(Rdata_SOA_Test, toText) {
 }
 
 TEST_F(Rdata_SOA_Test, getSerial) {
-    EXPECT_EQ(2010012601, rdata_soa.getSerial());
+    EXPECT_EQ(2010012601, rdata_soa.getSerial().getValue());
 }
 
 }
diff --git a/src/lib/dns/tests/serial_unittest.cc b/src/lib/dns/tests/serial_unittest.cc
new file mode 100644
index 0000000..e27f628
--- /dev/null
+++ b/src/lib/dns/tests/serial_unittest.cc
@@ -0,0 +1,179 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <dns/serial.h>
+
+using namespace isc::dns;
+
+class SerialTest : public ::testing::Test {
+public:
+    SerialTest() : one(1), one_2(1), two(2),
+                   date_zero(1980120100), date_one(1980120101),
+                   min(0), max(4294967295u),
+                   number_low(12345),
+                   number_medium(2000000000),
+                   number_high(4000000000u)
+    {}
+    Serial one, one_2, two, date_zero, date_one, min, max, number_low, number_medium, number_high;
+};
+
+//
+// Basic tests
+//
+
+TEST_F(SerialTest, get_value) {
+    EXPECT_EQ(1, one.getValue());
+    EXPECT_NE(2, one.getValue());
+    EXPECT_EQ(2, two.getValue());
+    EXPECT_EQ(1980120100, date_zero.getValue());
+    EXPECT_EQ(1980120101, date_one.getValue());
+    EXPECT_EQ(0, min.getValue());
+    EXPECT_EQ(4294967295u, max.getValue());
+    EXPECT_EQ(12345, number_low.getValue());
+    EXPECT_EQ(2000000000, number_medium.getValue());
+    EXPECT_EQ(4000000000u, number_high.getValue());
+}
+
+TEST_F(SerialTest, equals) {
+    EXPECT_EQ(one, one);
+    EXPECT_EQ(one, one_2);
+    EXPECT_NE(one, two);
+    EXPECT_NE(two, one);
+    EXPECT_EQ(Serial(12345), number_low);
+    EXPECT_NE(Serial(12346), number_low);
+}
+
+TEST_F(SerialTest, comparison) {
+    // These should be true/false even without serial arithmetic
+    EXPECT_LE(one, one);
+    EXPECT_LE(one, one_2);
+    EXPECT_LT(one, two);
+    EXPECT_LE(one, two);
+    EXPECT_GE(two, two);
+    EXPECT_GT(two, one);
+    EXPECT_GE(two, one);
+    EXPECT_LT(one, number_low);
+    EXPECT_LT(number_low, number_medium);
+    EXPECT_LT(number_medium, number_high);
+
+    // now let's try some that 'wrap', as it were
+    EXPECT_GT(min, max);
+    EXPECT_LT(max, min);
+    EXPECT_LT(number_high, number_low);
+}
+
+//
+// RFC 1982 Section 3.1
+//
+TEST_F(SerialTest, addition) {
+    EXPECT_EQ(two, one + one);
+    EXPECT_EQ(two, one + one_2);
+    EXPECT_EQ(max, max + min);
+    EXPECT_EQ(min, max + one);
+    EXPECT_EQ(one, max + two);
+    EXPECT_EQ(one, max + one + one);
+
+    EXPECT_EQ(one + 100, max + 102);
+    EXPECT_EQ(min + 2147483645, max + 2147483646);
+    EXPECT_EQ(min + 2147483646, max + MAX_SERIAL_INCREMENT);
+}
+
+//
+// RFC 1982 Section 3.2 has been checked by the basic tests above
+//
+
+//
+// RFC 1982 Section 4.1
+//
+
+// Helper function for addition_always_larger test, add some numbers
+// and check that the result is always larger than the original
+void do_addition_larger_test(const Serial& number) {
+    EXPECT_GE(number + 0, number);
+    EXPECT_EQ(number + 0, number);
+    EXPECT_GT(number + 1, number);
+    EXPECT_GT(number + 2, number);
+    EXPECT_GT(number + 100, number);
+    EXPECT_GT(number + 1111111, number);
+    EXPECT_GT(number + 2147483646, number);
+    EXPECT_GT(number + MAX_SERIAL_INCREMENT, number);
+    // Try MAX_SERIAL_INCREMENT as a hardcoded number as well
+    EXPECT_GT(number + 2147483647, number);
+}
+
+TEST_F(SerialTest, addition_always_larger) {
+    do_addition_larger_test(one);
+    do_addition_larger_test(two);
+    do_addition_larger_test(date_zero);
+    do_addition_larger_test(date_one);
+    do_addition_larger_test(min);
+    do_addition_larger_test(max);
+    do_addition_larger_test(number_low);
+    do_addition_larger_test(number_medium);
+    do_addition_larger_test(number_high);
+}
+
+//
+// RFC 1982 Section 4.2
+//
+
+// Helper function to do the second addition
+void
+do_two_additions_test_second(const Serial &original,
+                             const Serial &number)
+{
+    EXPECT_NE(original, number);
+    EXPECT_NE(original, number + 0);
+    EXPECT_NE(original, number + 1);
+    EXPECT_NE(original, number + 2);
+    EXPECT_NE(original, number + 100);
+    EXPECT_NE(original, number + 1111111);
+    EXPECT_NE(original, number + 2147483646);
+    EXPECT_NE(original, number + MAX_SERIAL_INCREMENT);
+    EXPECT_NE(original, number + 2147483647);
+}
+
+void do_two_additions_test_first(const Serial &number) {
+    do_two_additions_test_second(number, number + 1);
+    do_two_additions_test_second(number, number + 2);
+    do_two_additions_test_second(number, number + 100);
+    do_two_additions_test_second(number, number + 1111111);
+    do_two_additions_test_second(number, number + 2147483646);
+    do_two_additions_test_second(number, number + MAX_SERIAL_INCREMENT);
+    do_two_additions_test_second(number, number + 2147483647);
+}
+
+TEST_F(SerialTest, two_additions_never_equal) {
+    do_two_additions_test_first(one);
+    do_two_additions_test_first(two);
+    do_two_additions_test_first(date_zero);
+    do_two_additions_test_first(date_one);
+    do_two_additions_test_first(min);
+    do_two_additions_test_first(max);
+    do_two_additions_test_first(number_low);
+    do_two_additions_test_first(number_medium);
+    do_two_additions_test_first(number_high);
+}
+
+//
+// RFC 1982 Section 4.3 and 4.4 have nothing to test
+//
+
+//
+// Tests from RFC 1982 examples
+//
+TEST(SerialTextRFCExamples, rfc_example_tests) {
+}
diff --git a/src/lib/log/Makefile.am b/src/lib/log/Makefile.am
index 957d350..286e9fd 100644
--- a/src/lib/log/Makefile.am
+++ b/src/lib/log/Makefile.am
@@ -46,5 +46,4 @@ if USE_CLANGPP
 liblog_la_CXXFLAGS += -Wno-error
 endif
 liblog_la_CPPFLAGS = $(AM_CPPFLAGS) $(LOG4CPLUS_INCLUDES)
-liblog_la_LDFLAGS  = $(LOG4CPLUS_LDFLAGS)
-liblog_la_LIBADD   = $(top_builddir)/src/lib/util/libutil.la
+liblog_la_LIBADD   = $(LOG4CPLUS_LIBS) $(top_builddir)/src/lib/util/libutil.la
diff --git a/src/lib/log/tests/Makefile.am b/src/lib/log/tests/Makefile.am
index a5f793c..53e97a1 100644
--- a/src/lib/log/tests/Makefile.am
+++ b/src/lib/log/tests/Makefile.am
@@ -48,16 +48,18 @@ endif
 noinst_PROGRAMS = logger_example
 logger_example_SOURCES = logger_example.cc
 logger_example_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-logger_example_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
-logger_example_LDADD  = $(top_builddir)/src/lib/log/liblog.la
+logger_example_LDFLAGS = $(AM_LDFLAGS)
+logger_example_LDADD  = $(LOG4CPLUS_LIBS)
+logger_example_LDADD += $(top_builddir)/src/lib/log/liblog.la
 logger_example_LDADD += $(top_builddir)/src/lib/util/libutil.la
 logger_example_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 
 noinst_PROGRAMS += init_logger_test
 init_logger_test_SOURCES = init_logger_test.cc
 init_logger_test_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
-init_logger_test_LDFLAGS = $(AM_LDFLAGS) $(LOG4CPLUS_LDFLAGS)
-init_logger_test_LDADD  = $(top_builddir)/src/lib/log/liblog.la
+init_logger_test_LDFLAGS = $(AM_LDFLAGS)
+init_logger_test_LDADD  = $(LOG4CPLUS_LIBS)
+init_logger_test_LDADD += $(top_builddir)/src/lib/log/liblog.la
 init_logger_test_LDADD += $(top_builddir)/src/lib/util/libutil.la
 init_logger_test_LDADD += $(top_builddir)/src/lib/exceptions/libexceptions.la
 
diff --git a/src/lib/python/isc/bind10/Makefile.am b/src/lib/python/isc/bind10/Makefile.am
index c0f1e32..aa5d0ab 100644
--- a/src/lib/python/isc/bind10/Makefile.am
+++ b/src/lib/python/isc/bind10/Makefile.am
@@ -1,4 +1,5 @@
 SUBDIRS = . tests
 
-python_PYTHON = __init__.py sockcreator.py component.py special_component.py
+python_PYTHON = __init__.py sockcreator.py component.py special_component.py \
+		socket_cache.py
 pythondir = $(pyexecdir)/isc/bind10
diff --git a/src/lib/python/isc/bind10/socket_cache.py b/src/lib/python/isc/bind10/socket_cache.py
new file mode 100644
index 0000000..26e87d2
--- /dev/null
+++ b/src/lib/python/isc/bind10/socket_cache.py
@@ -0,0 +1,302 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+"""
+Here's the cache for sockets from socket creator.
+"""
+
+import os
+import random
+import isc.bind10.sockcreator
+from copy import copy
+
+class SocketError(Exception):
+    """
+    Exception raised when the socket creator is unable to create requested
+    socket. Possible reasons might be the address it should be bound to
+    is already taken, the permissions are insufficient, the address family
+    is not supported on this computer and many more.
+
+    The errno, if not None, is passed from the socket creator.
+    """
+    def __init__(self, message, errno):
+        Exception.__init__(self, message)
+        self.errno = errno
+
+class ShareError(Exception):
+    """
+    The requested socket is already taken by other component and the sharing
+    parameters don't allow sharing with the new request.
+    """
+    pass
+
+class Socket:
+    """
+    This represents one socket cached by the cache program. This should never
+    be used directly by a user, it is used internally by the Cache. Therefore
+    many member variables are used directly instead of by a accessor method.
+
+    Be warned that this object implements the __del__ method. It closes the
+    socket held inside in it. But this poses various problems with garbage
+    collector. In short, do not make reference cycles with this and generally
+    leave this class alone to live peacefully.
+    """
+    def __init__(self, protocol, address, port, fileno):
+        """
+        Creates the socket.
+
+        The protocol, address and port are preserved for the information.
+        """
+        self.protocol = protocol
+        self.address = address
+        self.port = port
+        self.fileno = fileno
+        # Mapping from token -> application
+        self.active_tokens = {}
+        # The tokens which were not yet picked up
+        self.waiting_tokens = set()
+        # Share modes and names by the tokens (token -> (mode, name))
+        self.shares = {}
+
+    def __del__(self):
+        """
+        Closes the file descriptor.
+        """
+        os.close(self.fileno)
+
+    def share_compatible(self, mode, name):
+        """
+        Checks if the given share mode and name is compatible with the ones
+        already installed here.
+
+        The allowed values for mode are listed in the Cache.get_token
+        function.
+        """
+        if mode not in ['NO', 'SAMEAPP', 'ANY']:
+            raise ValueError("Mode " + mode + " is invalid")
+
+        # Go through the existing ones
+        for (emode, ename) in self.shares.values():
+            if emode == 'NO' or mode == 'NO':
+                # One of them can't live together with anything
+                return False
+            if (emode == 'SAMEAPP' or mode == 'SAMEAPP') and \
+                ename != name:
+                # One of them can't live together with someone of different
+                # name
+                return False
+            # else both are ANY or SAMEAPP with the same name, which is OK
+        # No problem found, so we consider it OK
+        return True
+
+class Cache:
+    """
+    This is the cache for sockets from socket creator. The purpose of cache
+    is to hold the sockets that were requested, until they are no longer
+    needed. One reason is, the socket is created before it is sent over the
+    unix domain socket in boss, so we need to keep it somewhere for a while.
+
+    The other reason is, a single socket might be requested multiple times.
+    So we keep it here in case someone else might ask for it.
+
+    Each socket kept here has a reference count and when it drops to zero,
+    it is removed from cache and closed.
+
+    This is expected to be part of Boss, it is not a general utility class.
+
+    It is not expected to be subclassed. The methods and members are named
+    as protected so tests are easier access into them.
+    """
+    def __init__(self, creator):
+        """
+        Initialization. The creator is the socket creator object
+        (isc.bind10.sockcreator.Creator) which will be used to create yet
+        uncached sockets.
+        """
+        self._creator = creator
+        # The sockets we have live here, these dicts are various ways how
+        # to get them. Each of them contains the Socket objects somehow
+
+        # This one is dict of token: socket for the ones that were not yet
+        # picked up by an application.
+        self._waiting_tokens = {}
+        # This format is the same as above, but for the tokens that were
+        # already picked up by the application and not yet released.
+        self._active_tokens = {}
+        # This is a dict from applications to set of tokens used by the
+        # application, for the sockets already picked up by an application
+        self._active_apps = {}
+        # The sockets live here to be indexed by protocol, address and
+        # subsequently by port
+        self._sockets = {}
+        # These are just the tokens actually in use, so we don't generate
+        # dupes. If one is dropped, it can be potentially reclaimed.
+        self._live_tokens = set()
+
+    def get_token(self, protocol, address, port, share_mode, share_name):
+        """
+        This requests a token representing a socket. The socket is either
+        found in the cache already or requested from the creator at this time
+        (and cached for later time).
+
+        The parameters are:
+        - protocol: either 'UDP' or 'TCP'
+        - address: the IPAddr object representing the address to bind to
+        - port: integer saying which port to bind to
+        - share_mode: either 'NO', 'SAMEAPP' or 'ANY', specifying how the
+          socket can be shared with others. See bin/bind10/creatorapi.txt
+          for details.
+        - share_name: the name of application, in case of 'SAMEAPP' share
+          mode. Only requests with the same name can share the socket.
+
+        If the call is successful, it returns a string token which can be
+        used to pick up the socket later. The socket is created with reference
+        count zero and if it isn't picked up soon enough (the time yet has to
+        be set), it will be removed and the token is invalid.
+
+        It can fail in various ways. Explicitly listed exceptions are:
+        - SocketError: this one is thrown if the socket creator couldn't provide
+          the socket and it is not yet cached (it belongs to other application,
+          for example).
+        - ShareError: the socket is already in the cache, but it can't be
+          shared due to share_mode and share_name combination (both the request
+          restrictions and of all copies of socket handed out are considered,
+          so it can be raised even if you call it with share_mode 'ANY').
+        - isc.bind10.sockcreator.CreatorError: fatal creator errors are
+          propagated. Thay should cause the boss to exit if ever encountered.
+
+        Note that it isn't guaranteed the tokens would be unique and they
+        should be used as an opaque handle only.
+        """
+        addr_str = str(address)
+        try:
+            socket = self._sockets[protocol][addr_str][port]
+        except KeyError:
+            # Something in the dicts is not there, so socket is to be
+            # created
+            try:
+                fileno = self._creator.get_socket(address, port, protocol)
+            except isc.bind10.sockcreator.CreatorError as ce:
+                if ce.fatal:
+                    raise
+                else:
+                    raise SocketError(str(ce), ce.errno)
+            socket = Socket(protocol, address, port, fileno)
+            # And cache it
+            if protocol not in self._sockets:
+                self._sockets[protocol] = {}
+            if addr_str not in self._sockets[protocol]:
+                self._sockets[protocol][addr_str] = {}
+            self._sockets[protocol][addr_str][port] = socket
+        # Now we get the token, check it is compatible
+        if not socket.share_compatible(share_mode, share_name):
+            raise ShareError("Cached socket not compatible with mode " +
+                             share_mode + " and name " + share_name)
+        # Grab yet unused token
+        token = 't' + str(random.randint(0, 2^32-1))
+        while token in self._live_tokens:
+            token = 't' + str(random.randint(0, 2^32-1))
+        self._waiting_tokens[token] = socket
+        self._live_tokens.add(token)
+        socket.shares[token] = (share_mode, share_name)
+        socket.waiting_tokens.add(token)
+        return token
+
+    def get_socket(self, token, application):
+        """
+        This returns the socket created by get_token. The token should be the
+        one returned from previous call from get_token. The token can be used
+        only once to receive the socket.
+
+        The application is a token representing the application that requested
+        it. Currently, boss uses the file descriptor of connection from the
+        application, but anything which can be a key in a dict is OK from the
+        cache's point of view. You just need to use the same thing in
+        drop_application.
+
+        In case the token is considered invalid (it doesn't come from the
+        get_token, it was already used, the socket wasn't picked up soon
+        enough, ...), it raises ValueError.
+        """
+        try:
+            socket = self._waiting_tokens[token]
+        except KeyError:
+            raise ValueError("Token " + token +
+                             " isn't waiting to be picked up")
+        del self._waiting_tokens[token]
+        self._active_tokens[token] = socket
+        if application not in self._active_apps:
+            self._active_apps[application] = set()
+        self._active_apps[application].add(token)
+        socket.waiting_tokens.remove(token)
+        socket.active_tokens[token] = application
+        return socket.fileno
+
+    def drop_socket(self, token):
+        """
+        This signals the application no longer uses the socket which was
+        requested by the given token. It decreases the reference count for
+        the socket and closes and removes the cached copy if it was the last
+        one.
+
+        It raises ValueError if the token doesn't exist.
+        """
+        try:
+            socket = self._active_tokens[token]
+        except KeyError:
+            raise ValueError("Token " + token + " doesn't represent an " +
+                             "active socket")
+        # Now, remove everything from the bookkeeping
+        del socket.shares[token]
+        app = socket.active_tokens[token]
+        del socket.active_tokens[token]
+        del self._active_tokens[token]
+        self._active_apps[app].remove(token)
+        if len(self._active_apps[app]) == 0:
+            del self._active_apps[app]
+        self._live_tokens.remove(token)
+        # The socket is not used by anything now, so remove it
+        if len(socket.active_tokens) == 0 and len(socket.waiting_tokens) == 0:
+            addr = str(socket.address)
+            port = socket.port
+            proto = socket.protocol
+            del self._sockets[proto][addr][port]
+            # Clean up empty branches of the structure
+            if len(self._sockets[proto][addr]) == 0:
+                del self._sockets[proto][addr]
+            if len(self._sockets[proto]) == 0:
+                del self._sockets[proto]
+
+    def drop_application(self, application):
+        """
+        This signals the application terminated and all sockets it picked up
+        should be considered unused by it now. It effectively calls drop_socket
+        on each of the sockets the application picked up and didn't drop yet.
+
+        If the application is invalid (no get_socket was successful with this
+        value of application), it raises ValueError.
+        """
+        try:
+            # Get a copy. Who knows how iteration works through sets if we
+            # delete from it during the time, so we'll just have our own copy
+            # to iterate
+            to_drop = copy(self._active_apps[application])
+        except KeyError:
+            raise ValueError("Application " + str(application) +
+                             " doesn't hold any sockets")
+        for token in to_drop:
+            self.drop_socket(token)
+        # We don't call del now. The last drop_socket should have
+        # removed the application key as well.
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
index 9972200..c9c7683 100644
--- a/src/lib/python/isc/bind10/special_component.py
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -42,6 +42,7 @@ class SockCreator(BaseComponent):
         self.__creator = isc.bind10.sockcreator.Creator(LIBEXECDIR + ':' +
                                                         os.environ['PATH'])
         self._boss.register_process(self.pid(), self)
+        self._boss.set_creator(self.__creator)
         self._boss.log_started(self.pid())
 
     def _stop_internal(self):
@@ -108,16 +109,6 @@ class CmdCtl(Component):
         Component.__init__(self, process, boss, kind, 'Cmdctl', None,
                            boss.start_cmdctl)
 
-class XfrIn(Component):
-    def __init__(self, process, boss, kind, address=None, params=None):
-        Component.__init__(self, process, boss, kind, 'Xfrin', None,
-                           boss.start_xfrin)
-
-class XfrOut(Component):
-    def __init__(self, process, boss, kind, address=None, params=None):
-        Component.__init__(self, process, boss, kind, 'Xfrout', None,
-                           boss.start_xfrout)
-
 class SetUID(BaseComponent):
     """
     This is a pseudo-component which drops root privileges when started
@@ -157,9 +148,6 @@ def get_specials():
         'auth': Auth,
         'resolver': Resolver,
         'cmdctl': CmdCtl,
-        # FIXME: Temporary workaround before #1292 is done
-        'xfrin': XfrIn,
-        'xfrout': XfrOut,
         # TODO: Remove when not needed, workaround before sockcreator works
         'setuid': SetUID
     }
diff --git a/src/lib/python/isc/bind10/tests/Makefile.am b/src/lib/python/isc/bind10/tests/Makefile.am
index df625b2..658db1e 100644
--- a/src/lib/python/isc/bind10/tests/Makefile.am
+++ b/src/lib/python/isc/bind10/tests/Makefile.am
@@ -1,7 +1,7 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 #PYTESTS = args_test.py bind10_test.py
 # NOTE: this has a generated test found in the builddir
-PYTESTS = sockcreator_test.py component_test.py
+PYTESTS = sockcreator_test.py component_test.py socket_cache_test.py
 
 EXTRA_DIST = $(PYTESTS)
 
diff --git a/src/lib/python/isc/bind10/tests/component_test.py b/src/lib/python/isc/bind10/tests/component_test.py
index 7e18e60..3b49b18 100644
--- a/src/lib/python/isc/bind10/tests/component_test.py
+++ b/src/lib/python/isc/bind10/tests/component_test.py
@@ -86,9 +86,6 @@ class BossUtils:
     def start_cmdctl(self):
         pass
 
-    def start_xfrin(self):
-        pass
-
 class ComponentTests(BossUtils, unittest.TestCase):
     """
     Tests for the bind10.component.Component class
@@ -511,7 +508,6 @@ class ComponentTests(BossUtils, unittest.TestCase):
                                isc.bind10.special_component.Auth,
                                isc.bind10.special_component.Resolver,
                                isc.bind10.special_component.CmdCtl,
-                               isc.bind10.special_component.XfrIn,
                                isc.bind10.special_component.SetUID]:
             component = component_type('none', self, 'needed')
             self.assertIsNone(component.pid())
diff --git a/src/lib/python/isc/bind10/tests/sockcreator_test.py b/src/lib/python/isc/bind10/tests/sockcreator_test.py
index 4453184..d97d21b 100644
--- a/src/lib/python/isc/bind10/tests/sockcreator_test.py
+++ b/src/lib/python/isc/bind10/tests/sockcreator_test.py
@@ -13,9 +13,6 @@
 # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
 # WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
 
-# This test file is generated .py.in -> .py just to be in the build dir,
-# same as the rest of the tests. Saves a lot of stuff in makefile.
-
 """
 Tests for the bind10.sockcreator module.
 """
diff --git a/src/lib/python/isc/bind10/tests/socket_cache_test.py b/src/lib/python/isc/bind10/tests/socket_cache_test.py
new file mode 100644
index 0000000..bbbf776
--- /dev/null
+++ b/src/lib/python/isc/bind10/tests/socket_cache_test.py
@@ -0,0 +1,396 @@
+# Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
+# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
+# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
+# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import unittest
+import isc.log
+import isc.bind10.socket_cache
+import isc.bind10.sockcreator
+from isc.net.addr import IPAddr
+import os
+
+class Test(unittest.TestCase):
+    """
+    Base for the tests here. It replaces the os.close method.
+    """
+    def setUp(self):
+        self._closes = []
+        isc.bind10.socket_cache.os.close = self.__close
+
+    def tearDown(self):
+        # This is not very clean solution. But when the test stops
+        # to exist, the method must not be used to destroy the
+        # object any more. And we can't restore the os.close here
+        # as we never work with real sockets here.
+        isc.bind10.socket_cache.os.close = lambda fd: None
+
+    def __close(self, fd):
+        """
+        Just log a close was called.
+        """
+        self._closes.append(fd)
+
+class SocketTest(Test):
+    """
+    Test for the Socket class.
+    """
+    def setUp(self):
+        """
+        Creates the socket to be tested.
+
+        It also creates other useful test variables.
+        """
+        Test.setUp(self)
+        self.__address = IPAddr("192.0.2.1")
+        self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+                                                       1024, 42)
+
+    def test_init(self):
+        """
+        Checks the intrnals of the cache just after the creation.
+        """
+        self.assertEqual('UDP', self.__socket.protocol)
+        self.assertEqual(self.__address, self.__socket.address)
+        self.assertEqual(1024, self.__socket.port)
+        self.assertEqual(42, self.__socket.fileno)
+        self.assertEqual({}, self.__socket.active_tokens)
+        self.assertEqual({}, self.__socket.shares)
+        self.assertEqual(set(), self.__socket.waiting_tokens)
+
+    def test_del(self):
+        """
+        Check it closes the socket when removed.
+        """
+        # This should make the refcount 0 and call the descructor
+        # right away
+        self.__socket = None
+        self.assertEqual([42], self._closes)
+
+    def test_share_modes(self):
+        """
+        Test the share mode compatibility check function.
+        """
+        modes = ['NO', 'SAMEAPP', 'ANY']
+        # If there are no shares, it is compatible with everything.
+        for mode in modes:
+            self.assertTrue(self.__socket.share_compatible(mode, 'anything'))
+
+        # There's an NO already, so it is incompatible with everything.
+        self.__socket.shares = {'token': ('NO', 'anything')}
+        for mode in modes:
+            self.assertFalse(self.__socket.share_compatible(mode, 'anything'))
+
+        # If there's SAMEAPP, it is compatible with ANY and SAMEAPP with the
+        # same name.
+        self.__socket.shares = {'token': ('SAMEAPP', 'app')}
+        self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+        self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+                                                        'something'))
+        self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+        self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+        self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+
+        # If there's ANY, then ANY and SAMEAPP with the same name is compatible
+        self.__socket.shares = {'token': ('ANY', 'app')}
+        self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+        self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+                                                        'something'))
+        self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+        self.assertTrue(self.__socket.share_compatible('ANY', 'something'))
+
+        # In case there are multiple already inside
+        self.__socket.shares = {
+            'token': ('ANY', 'app'),
+            'another': ('SAMEAPP', 'app')
+        }
+        self.assertFalse(self.__socket.share_compatible('NO', 'app'))
+        self.assertFalse(self.__socket.share_compatible('SAMEAPP',
+                                                        'something'))
+        self.assertTrue(self.__socket.share_compatible('SAMEAPP', 'app'))
+        self.assertFalse(self.__socket.share_compatible('ANY', 'something'))
+        self.assertTrue(self.__socket.share_compatible('ANY', 'app'))
+
+        # Invalid inputs are rejected
+        self.assertRaises(ValueError, self.__socket.share_compatible, 'bad',
+                          'bad')
+
+class SocketCacheTest(Test):
+    """
+    Some tests for the isc.bind10.socket_cache.Cache.
+
+    This class, as well as being the testcase, pretends to be the
+    socket creator so it can hijack all the requests for sockets.
+    """
+    def setUp(self):
+        """
+        Creates the cache for tests with us being the socket creator.
+
+        Also creates some more variables for testing.
+        """
+        Test.setUp(self)
+        self.__cache = isc.bind10.socket_cache.Cache(self)
+        self.__address = IPAddr("192.0.2.1")
+        self.__socket = isc.bind10.socket_cache.Socket('UDP', self.__address,
+                                                       1024, 42)
+        self.__get_socket_called = False
+
+    def test_init(self):
+        """
+        Checks the internals of the cache just after the creation.
+        """
+        self.assertEqual(self, self.__cache._creator)
+        self.assertEqual({}, self.__cache._waiting_tokens)
+        self.assertEqual({}, self.__cache._active_tokens)
+        self.assertEqual({}, self.__cache._active_apps)
+        self.assertEqual({}, self.__cache._sockets)
+        self.assertEqual(set(), self.__cache._live_tokens)
+
+    def get_socket(self, address, port, socktype):
+        """
+        Pretend to be a socket creator.
+
+        This expects to be called with the _address, port 1024 and 'UDP'.
+
+        Returns 42 and notes down it was called.
+        """
+        self.assertEqual(self.__address, address)
+        self.assertEqual(1024, port)
+        self.assertEqual('UDP', socktype)
+        self.__get_socket_called = True
+        return 42
+
+    def test_get_token_cached(self):
+        """
+        Check the behaviour of get_token when the requested socket is already
+        cached inside.
+        """
+        self.__cache._sockets = {
+            'UDP': {'192.0.2.1': {1024: self.__socket}}
+        }
+        token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+                                       'test')
+        # It didn't call get_socket
+        self.assertFalse(self.__get_socket_called)
+        # It returned something
+        self.assertIsNotNone(token)
+        # The token is both in the waiting sockets and the live tokens
+        self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+        self.assertEqual(set([token]), self.__cache._live_tokens)
+        # The token got the new share to block any relevant queries
+        self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+        # The socket knows the token is waiting in it
+        self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+        # If we request one more, with incompatible share, it is rejected
+        self.assertRaises(isc.bind10.socket_cache.ShareError,
+                          self.__cache.get_token, 'UDP', self.__address, 1024,
+                          'NO', 'test')
+        # The internals are not changed, so the same checks
+        self.assertEqual({token: self.__socket}, self.__cache._waiting_tokens)
+        self.assertEqual(set([token]), self.__cache._live_tokens)
+        self.assertEqual({token: ('ANY', 'test')}, self.__socket.shares)
+        self.assertEqual(set([token]), self.__socket.waiting_tokens)
+
+    def test_get_token_uncached(self):
+        """
+        Check a new socket is created when a corresponding one is missing.
+        """
+        token = self.__cache.get_token('UDP', self.__address, 1024, 'ANY',
+                                       'test')
+        # The get_socket was called
+        self.assertTrue(self.__get_socket_called)
+        # It returned something
+        self.assertIsNotNone(token)
+        # Get the socket and check it looks OK
+        socket = self.__cache._waiting_tokens[token]
+        self.assertEqual(self.__address, socket.address)
+        self.assertEqual(1024, socket.port)
+        self.assertEqual(42, socket.fileno)
+        self.assertEqual('UDP', socket.protocol)
+        # The socket is properly cached
+        self.assertEqual({
+            'UDP': {'192.0.2.1': {1024: socket}}
+        }, self.__cache._sockets)
+        # The token is both in the waiting sockets and the live tokens
+        self.assertEqual({token: socket}, self.__cache._waiting_tokens)
+        self.assertEqual(set([token]), self.__cache._live_tokens)
+        # The token got the new share to block any relevant queries
+        self.assertEqual({token: ('ANY', 'test')}, socket.shares)
+        # The socket knows the token is waiting in it
+        self.assertEqual(set([token]), socket.waiting_tokens)
+
+    def test_get_token_excs(self):
+        """
+        Test that it is handled properly if the socket creator raises
+        some exceptions.
+        """
+        def raiseCreatorError(fatal):
+            raise isc.bind10.sockcreator.CreatorError('test error', fatal)
+        # First, fatal socket creator errors are passed through
+        self.get_socket = lambda addr, port, proto: raiseCreatorError(True)
+        self.assertRaises(isc.bind10.sockcreator.CreatorError,
+                          self.__cache.get_token, 'UDP', self.__address, 1024,
+                          'NO', 'test')
+        # And nonfatal are converted to SocketError
+        self.get_socket = lambda addr, port, proto: raiseCreatorError(False)
+        self.assertRaises(isc.bind10.socket_cache.SocketError,
+                          self.__cache.get_token, 'UDP', self.__address, 1024,
+                          'NO', 'test')
+
+    def test_get_socket(self):
+        """
+        Test that we can pickup a socket if we know a token.
+        """
+        token = "token"
+        app = 13
+        # No socket prepared there
+        self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+        # Not changed
+        self.assertEqual({}, self.__cache._active_tokens)
+        self.assertEqual({}, self.__cache._active_apps)
+        self.assertEqual({}, self.__cache._sockets)
+        self.assertEqual(set(), self.__cache._live_tokens)
+        # Prepare a token there
+        self.__socket.waiting_tokens = set([token])
+        self.__socket.shares = {token: ('ANY', 'app')}
+        self.__cache._waiting_tokens = {token: self.__socket}
+        self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+        self.__cache._live_tokens = set([token])
+        socket = self.__cache.get_socket(token, app)
+        # Received the fileno
+        self.assertEqual(42, socket)
+        # It moved from waiting to active ones
+        self.assertEqual({}, self.__cache._waiting_tokens)
+        self.assertEqual({token: self.__socket}, self.__cache._active_tokens)
+        self.assertEqual({13: set([token])}, self.__cache._active_apps)
+        self.assertEqual(set([token]), self.__cache._live_tokens)
+        self.assertEqual(set(), self.__socket.waiting_tokens)
+        self.assertEqual({token: 13}, self.__socket.active_tokens)
+        # Trying to get it again fails
+        self.assertRaises(ValueError, self.__cache.get_socket, token, app)
+
+    def test_drop_application(self):
+        """
+        Test that a drop_application calls drop_socket on all the sockets
+        held by the application.
+        """
+        sockets = set()
+        def drop_socket(token):
+            sockets.add(token)
+        # Mock the drop_socket so we know it is called
+        self.__cache.drop_socket = drop_socket
+        self.assertRaises(ValueError, self.__cache.drop_application,
+                          13)
+        self.assertEqual(set(), sockets)
+        # Put the tokens into active_apps. Nothing else should be touched
+        # by this call, so leave it alone.
+        self.__cache._active_apps = {
+            1: set(['t1', 't2']),
+            2: set(['t3'])
+        }
+        self.__cache.drop_application(1)
+        # We don't check the _active_apps, as it would be cleaned by
+        # drop_socket and we removed it.
+        self.assertEqual(set(['t1', 't2']), sockets)
+
+    def test_drop_socket(self):
+        """
+        Test the drop_socket call. It tests:
+        * That a socket that still has something to keep it alive is left alive
+          (both waiting and active).
+        * If not, it is deleted.
+        * All bookkeeping data around are properly removed.
+        * Of course the exception.
+        """
+        self.assertRaises(ValueError, self.__cache.drop_socket, "bad token")
+        self.__socket.active_tokens = {'t1': 1}
+        self.__socket.waiting_tokens = set(['t2'])
+        self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+        self.__cache._waiting_tokens = {'t2': self.__socket}
+        self.__cache._active_tokens = {'t1': self.__socket}
+        self.__cache._sockets = {'UDP': {'192.0.2.1': {1024: self.__socket}}}
+        self.__cache._live_tokens = set(['t1', 't2'])
+        self.__cache._active_apps = {1: set(['t1'])}
+        # We can't drop what wasn't picket up yet
+        self.assertRaises(ValueError, self.__cache.drop_socket, 't2')
+        self.assertEqual({'t1': 1}, self.__socket.active_tokens)
+        self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+        self.assertEqual({'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')},
+                         self.__socket.shares)
+        self.assertEqual({'t2': self.__socket}, self.__cache._waiting_tokens)
+        self.assertEqual({'t1': self.__socket}, self.__cache._active_tokens)
+        self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+                         self.__cache._sockets)
+        self.assertEqual(set(['t1', 't2']), self.__cache._live_tokens)
+        self.assertEqual({1: set(['t1'])}, self.__cache._active_apps)
+        self.assertEqual([], self._closes)
+        # If we drop this, it survives because it waits for being picked up
+        self.__cache.drop_socket('t1')
+        self.assertEqual({}, self.__socket.active_tokens)
+        self.assertEqual(set(['t2']), self.__socket.waiting_tokens)
+        self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+        self.assertEqual({}, self.__cache._active_tokens)
+        self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+                         self.__cache._sockets)
+        self.assertEqual(set(['t2']), self.__cache._live_tokens)
+        self.assertEqual({}, self.__cache._active_apps)
+        self.assertEqual([], self._closes)
+        # Fill it again, now two applications having the same socket
+        self.__socket.active_tokens = {'t1': 1, 't2': 2}
+        self.__socket.waiting_tokens = set()
+        self.__socket.shares = {'t1': ('ANY', 'app1'), 't2': ('ANY', 'app2')}
+        self.__cache._waiting_tokens = {}
+        self.__cache._active_tokens = {
+            't1': self.__socket,
+            't2': self.__socket
+        }
+        self.__cache._live_tokens = set(['t1', 't2', 't3'])
+        self.assertEqual([], self._closes)
+        # We cheat here little bit, the t3 doesn't exist enywhere else, but
+        # we need to check the app isn't removed too soon and it shouldn't
+        # matter anywhere else, so we just avoid the tiresome filling in
+        self.__cache._active_apps = {1: set(['t1', 't3']), 2: set(['t2'])}
+        # Drop it as t1. It should still live.
+        self.__cache.drop_socket('t1')
+        self.assertEqual({'t2': 2}, self.__socket.active_tokens)
+        self.assertEqual(set(), self.__socket.waiting_tokens)
+        self.assertEqual({'t2': ('ANY', 'app2')}, self.__socket.shares)
+        self.assertEqual({}, self.__cache._waiting_tokens)
+        self.assertEqual({'t2': self.__socket}, self.__cache._active_tokens)
+        self.assertEqual({'UDP': {'192.0.2.1': {1024: self.__socket}}},
+                         self.__cache._sockets)
+        self.assertEqual(set(['t3', 't2']), self.__cache._live_tokens)
+        self.assertEqual({1: set(['t3']), 2: set(['t2'])},
+                         self.__cache._active_apps)
+        self.assertEqual([], self._closes)
+        # Drop it again, from the other application. It should get removed
+        # and closed.
+        self.__cache.drop_socket('t2')
+        self.assertEqual({}, self.__socket.active_tokens)
+        self.assertEqual(set(), self.__socket.waiting_tokens)
+        self.assertEqual({}, self.__socket.shares)
+        self.assertEqual({}, self.__cache._waiting_tokens)
+        self.assertEqual({}, self.__cache._active_tokens)
+        self.assertEqual({}, self.__cache._sockets)
+        self.assertEqual(set(['t3']), self.__cache._live_tokens)
+        self.assertEqual({1: set(['t3'])}, self.__cache._active_apps)
+        # The cache doesn't hold the socket. So when we remove it ourself,
+        # it should get closed.
+        self.__socket = None
+        self.assertEqual([42], self._closes)
+
+if __name__ == '__main__':
+    isc.log.init("bind10")
+    isc.log.resetUnitTestRootLogger()
+    unittest.main()
diff --git a/src/lib/python/isc/datasrc/Makefile.am b/src/lib/python/isc/datasrc/Makefile.am
index fb6d151..47f3dbc 100644
--- a/src/lib/python/isc/datasrc/Makefile.am
+++ b/src/lib/python/isc/datasrc/Makefile.am
@@ -24,6 +24,7 @@ datasrc_la_CXXFLAGS = $(AM_CXXFLAGS) $(PYTHON_CXXFLAGS)
 datasrc_la_LDFLAGS = $(PYTHON_LDFLAGS)
 datasrc_la_LDFLAGS += -module
 datasrc_la_LIBADD = $(top_builddir)/src/lib/datasrc/libdatasrc.la
+datasrc_la_LIBADD += $(top_builddir)/src/lib/cc/libcc.la
 datasrc_la_LIBADD += $(top_builddir)/src/lib/dns/python/libpydnspp.la
 datasrc_la_LIBADD += $(PYTHON_LIB)
 
diff --git a/src/lib/python/isc/datasrc/finder_inc.cc b/src/lib/python/isc/datasrc/finder_inc.cc
index 4a00e78..82c5fdc 100644
--- a/src/lib/python/isc/datasrc/finder_inc.cc
+++ b/src/lib/python/isc/datasrc/finder_inc.cc
@@ -46,6 +46,7 @@ Return the RR class of the zone.\n\
 // - Return type: use tuple instead of the dedicated FindResult type
 // - NULL->None
 // - exceptions
+// - description of the 'target' parameter (must be None for now)
 const char* const ZoneFinder_find_doc = "\
 find(name, type, target=None, options=FIND_DEFAULT) -> (integer, RRset)\n\
 \n\
@@ -74,6 +75,7 @@ answer for the search key. Specifically,\n\
 - If the target isn't None, all RRsets under the domain are inserted\n\
   there and SUCCESS (or NXDOMAIN, in case of empty domain) is returned\n\
   instead of normall processing. This is intended to handle ANY query.\n\
+  (Note: the Python version doesn't support this feature yet)\n\
 \n\
 Note: This behavior is controversial as we discussed in\n\
 https://lists.isc.org/pipermail/bind10-dev/2011-January/001918.html We\n\
@@ -105,8 +107,7 @@ internal error in the datasource.\n\
 Parameters:\n\
   name       The domain name to be searched for.\n\
   type       The RR type to be searched for.\n\
-  target     If target is not None, insert all RRs under the domain\n\
-             into it.\n\
+  target     Must be None.\n\
   options    The search options.\n\
 \n\
 Return Value(s): A tuple of a result code (integer) and an RRset object\n\
diff --git a/src/lib/python/isc/datasrc/finder_python.cc b/src/lib/python/isc/datasrc/finder_python.cc
index 6585049..7f74133 100644
--- a/src/lib/python/isc/datasrc/finder_python.cc
+++ b/src/lib/python/isc/datasrc/finder_python.cc
@@ -53,26 +53,29 @@ namespace isc_datasrc_internal {
 PyObject* ZoneFinder_helper(ZoneFinder* finder, PyObject* args) {
     if (finder == NULL) {
         PyErr_SetString(getDataSourceException("Error"),
-                        "Internal error in find() wrapper; finder object NULL");
+                        "Internal error in find() wrapper; "
+                        "finder object NULL");
         return (NULL);
     }
-    PyObject *name;
-    PyObject *rrtype;
-    PyObject *target;
-    int options_int;
-    if (PyArg_ParseTuple(args, "O!O!OI", &name_type, &name,
+    PyObject* name;
+    PyObject* rrtype;
+    PyObject* target = Py_None;
+    unsigned int options_int = ZoneFinder::FIND_DEFAULT;
+    if (PyArg_ParseTuple(args, "O!O!|OI", &name_type, &name,
                                          &rrtype_type, &rrtype,
                                          &target, &options_int)) {
         try {
+            if (target != Py_None) {
+                PyErr_SetString(PyExc_TypeError,
+                                "find(): target must be None in this version");
+                return (NULL);
+            }
             ZoneFinder::FindOptions options =
                 static_cast<ZoneFinder::FindOptions>(options_int);
-            ZoneFinder::FindResult find_result(
-                finder->find(PyName_ToName(name),
-                                   PyRRType_ToRRType(rrtype),
-                                   NULL,
-                                   options
-                                   ));
-            ZoneFinder::Result r = find_result.code;
+            const ZoneFinder::FindResult find_result(
+                finder->find(PyName_ToName(name), PyRRType_ToRRType(rrtype),
+                             NULL, options));
+            const ZoneFinder::Result r = find_result.code;
             isc::dns::ConstRRsetPtr rrsp = find_result.rrset;
             if (rrsp) {
                 // Use N instead of O so the refcount isn't increased twice
diff --git a/src/lib/python/isc/datasrc/tests/Makefile.am b/src/lib/python/isc/datasrc/tests/Makefile.am
index 400abcf..ab89b93 100644
--- a/src/lib/python/isc/datasrc/tests/Makefile.am
+++ b/src/lib/python/isc/datasrc/tests/Makefile.am
@@ -34,5 +34,6 @@ endif
 	PYTHONPATH=:$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/python/isc/log:$(abs_top_builddir)/src/lib/python/isc/datasrc/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs \
 	TESTDATA_PATH=$(abs_srcdir)/testdata \
 	TESTDATA_WRITE_PATH=$(abs_builddir) \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index e46c177..3e4a1d7 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -285,6 +285,24 @@ class DataSrcClient(unittest.TestCase):
         self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
                          rrset.to_text())
 
+        # Check the optional parameters are optional
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A())
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        result, rrset = finder.find(isc.dns.Name("www.example.com"),
+                                    isc.dns.RRType.A(), None)
+        self.assertEqual(finder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Invalid value for the "target"
+        self.assertRaises(TypeError, finder.find,
+                          isc.dns.Name("www.example.com"),
+                          isc.dns.RRType.A(), True)
+
         result, rrset = finder.find(isc.dns.Name("www.sql1.example.com"),
                                     isc.dns.RRType.A(),
                                     None,
@@ -385,6 +403,36 @@ class DataSrcUpdater(unittest.TestCase):
         # can't construct directly
         self.assertRaises(TypeError, isc.datasrc.ZoneUpdater)
 
+    def test_update_finder(self):
+        # Check basic behavior of updater's finder
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+        updater = dsc.get_updater(isc.dns.Name("example.com"), False)
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A(),
+                                     None,
+                                     ZoneFinder.FIND_DEFAULT)
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Omit optional parameters
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A())
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        result, rrset = updater.find(isc.dns.Name("www.example.com"),
+                                     isc.dns.RRType.A(), None)
+        self.assertEqual(ZoneFinder.SUCCESS, result)
+        self.assertEqual("www.example.com. 3600 IN A 192.0.2.1\n",
+                         rrset.to_text())
+
+        # Invalid value for 'target'
+        self.assertRaises(TypeError, updater.find,
+                          isc.dns.Name("www.example.com"),
+                          isc.dns.RRType.A(), 1)
+
     def test_update_delete_commit(self):
 
         dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index af79b7c..64a4b3e 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -284,14 +284,12 @@ class NotifyOut:
                          format_zone_str(zone_name, zone_class))
             return []
 
-        result, ns_rrset = finder.find(zone_name, RRType.NS(), None,
-                                       finder.FIND_DEFAULT)
+        result, ns_rrset = finder.find(zone_name, RRType.NS())
         if result is not finder.SUCCESS or ns_rrset is None:
             logger.warn(NOTIFY_OUT_ZONE_NO_NS,
                         format_zone_str(zone_name, zone_class))
             return []
-        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
-                                        finder.FIND_DEFAULT)
+        result, soa_rrset = finder.find(zone_name, RRType.SOA())
         if result is not finder.SUCCESS or soa_rrset is None or \
                 soa_rrset.get_rdata_count() != 1:
             logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
@@ -304,13 +302,11 @@ class NotifyOut:
             ns_name = Name(ns_rdata.to_text())
             if soa_mname == ns_name:
                 continue
-            result, rrset = finder.find(ns_name, RRType.A(), None,
-                                        finder.FIND_DEFAULT)
+            result, rrset = finder.find(ns_name, RRType.A())
             if result is finder.SUCCESS and rrset is not None:
                 addrs.extend([a.to_text() for a in rrset.get_rdata()])
 
-            result, rrset = finder.find(ns_name, RRType.AAAA(), None,
-                                        finder.FIND_DEFAULT)
+            result, rrset = finder.find(ns_name, RRType.AAAA())
             if result is finder.SUCCESS and rrset is not None:
                 addrs.extend([aaaa.to_text() for aaaa in rrset.get_rdata()])
 
@@ -504,8 +500,7 @@ class NotifyOut:
                                            zone_name.to_text() + '/' +
                                            zone_class.to_text() + ' not found')
 
-        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
-                                        finder.FIND_DEFAULT)
+        result, soa_rrset = finder.find(zone_name, RRType.SOA())
         if result is not finder.SUCCESS or soa_rrset is None or \
                 soa_rrset.get_rdata_count() != 1:
             raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 6b62b90..3af5991 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -29,5 +29,6 @@ endif
 	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
 	TESTDATASRCDIR=$(abs_top_srcdir)/src/lib/python/isc/notify/tests/testdata/ \
+	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/testutils/rrset_utils.py b/src/lib/python/isc/testutils/rrset_utils.py
index 8c22d92..7eac772 100644
--- a/src/lib/python/isc/testutils/rrset_utils.py
+++ b/src/lib/python/isc/testutils/rrset_utils.py
@@ -53,6 +53,25 @@ def create_ns(nsname, name=Name('example.com'), ttl=3600):
     rrset.add_rdata(Rdata(RRType.NS(), RRClass.IN(), nsname))
     return rrset
 
+def create_cname(target='target.example.com', name=Name('example.com'),
+                 ttl=3600):
+    rrset = RRset(name, RRClass.IN(), RRType.CNAME(), RRTTL(ttl))
+    rrset.add_rdata(Rdata(RRType.CNAME(), RRClass.IN(), target))
+    return rrset
+
+def create_generic(name, rdlen, type=RRType('TYPE65300'), ttl=3600):
+    '''Create an RR of a general type with an arbitrary length of RDATA
+
+    If the RR type isn't specified, type of 65300 will be used, which is
+    arbitrarily chosen from the IANA "Reserved for Private Usage" range.
+    The RDATA will be filled with specified length of all-0 data.
+
+    '''
+    rrset = RRset(name, RRClass.IN(), type, RRTTL(ttl))
+    rrset.add_rdata(Rdata(type, RRClass.IN(), '\\# ' +
+                          str(rdlen) + ' ' + '00' * rdlen))
+    return rrset
+
 def create_soa(serial, name=Name('example.com'), ttl=3600):
     '''For convenience we use a default name often used as a zone name'''
 
diff --git a/src/lib/xfr/Makefile.am b/src/lib/xfr/Makefile.am
index d714990..3d7f60f 100644
--- a/src/lib/xfr/Makefile.am
+++ b/src/lib/xfr/Makefile.am
@@ -1,3 +1,5 @@
+SUBDIRS = . tests
+
 AM_CPPFLAGS = -I$(top_srcdir)/src/lib -I$(top_builddir)/src/lib
 AM_CPPFLAGS += -I$(top_srcdir)/src/lib/dns -I$(top_builddir)/src/lib/dns
 AM_CPPFLAGS += $(BOOST_INCLUDES)
diff --git a/src/lib/xfr/tests/Makefile.am b/src/lib/xfr/tests/Makefile.am
new file mode 100644
index 0000000..4abb456
--- /dev/null
+++ b/src/lib/xfr/tests/Makefile.am
@@ -0,0 +1,25 @@
+AM_CPPFLAGS = -I$(top_builddir)/src/lib -I$(top_srcdir)/src/lib
+AM_CPPFLAGS += $(BOOST_INCLUDES)
+AM_CXXFLAGS = $(B10_CXXFLAGS)
+
+if USE_STATIC_LINK
+AM_LDFLAGS = -static
+endif
+
+CLEANFILES = *.gcno *.gcda
+
+TESTS =
+if HAVE_GTEST
+TESTS += run_unittests
+run_unittests_SOURCES = run_unittests.cc client_test.cc
+
+run_unittests_CPPFLAGS = $(AM_CPPFLAGS) $(GTEST_INCLUDES)
+run_unittests_LDFLAGS = $(AM_LDFLAGS) $(GTEST_LDFLAGS)
+
+run_unittests_LDADD = $(GTEST_LDADD)
+run_unittests_LDADD += $(top_builddir)/src/lib/util/unittests/libutil_unittests.la
+run_unittests_LDADD += $(top_builddir)/src/lib/xfr/libxfr.la
+run_unittests_LDADD += $(top_builddir)/src/lib/log/liblog.la
+endif
+
+noinst_PROGRAMS = $(TESTS)
diff --git a/src/lib/xfr/tests/client_test.cc b/src/lib/xfr/tests/client_test.cc
new file mode 100644
index 0000000..6c9f4ad
--- /dev/null
+++ b/src/lib/xfr/tests/client_test.cc
@@ -0,0 +1,37 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+
+#include <sys/un.h>
+#include <string>
+
+#include <xfr/xfrout_client.h>
+
+using namespace std;
+using namespace isc::xfr;
+
+namespace {
+
+TEST(ClientTest, connetFile) {
+    // File path is too long
+    struct sockaddr_un s;     // can't be const; some compiler complains
+    EXPECT_THROW(XfroutClient(string(sizeof(s.sun_path), 'x')).connect(),
+                 XfroutError);
+
+    // File doesn't exist (we assume the file "no_such_file" doesn't exist)
+    EXPECT_THROW(XfroutClient("no_such_file").connect(), XfroutError);
+}
+
+}
diff --git a/src/lib/xfr/tests/run_unittests.cc b/src/lib/xfr/tests/run_unittests.cc
new file mode 100644
index 0000000..8dc59a2
--- /dev/null
+++ b/src/lib/xfr/tests/run_unittests.cc
@@ -0,0 +1,24 @@
+// Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+//
+// Permission to use, copy, modify, and/or distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+// REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+// AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+// INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+// LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+// OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+// PERFORMANCE OF THIS SOFTWARE.
+
+#include <gtest/gtest.h>
+#include <log/logger_support.h>
+#include <util/unittests/run_all.h>
+
+int
+main(int argc, char* argv[]) {
+    ::testing::InitGoogleTest(&argc, argv);
+    isc::log::initLogger();
+    return (isc::util::unittests::run_all());
+}
diff --git a/src/lib/xfr/xfrout_client.cc b/src/lib/xfr/xfrout_client.cc
index 6ab905b..227ffc4 100644
--- a/src/lib/xfr/xfrout_client.cc
+++ b/src/lib/xfr/xfrout_client.cc
@@ -52,10 +52,11 @@ XfroutClient::~XfroutClient() {
 
 void
 XfroutClient::connect() {
-    asio::error_code err;
-    impl_->socket_.connect(stream_protocol::endpoint(impl_->file_path_), err);
-    if (err) {
-        isc_throw(XfroutError, "socket connect failed: " << err.message());
+    try {
+        impl_->socket_.connect(stream_protocol::endpoint(impl_->file_path_));
+    } catch (const asio::system_error& err) {
+        isc_throw(XfroutError, "socket connect failed for " <<
+                  impl_->file_path_ << ": " << err.what());
     }
 }
 
diff --git a/tests/lettuce/features/terrain/bind10_control.py b/tests/lettuce/features/terrain/bind10_control.py
index fbbb3a2..5248316 100644
--- a/tests/lettuce/features/terrain/bind10_control.py
+++ b/tests/lettuce/features/terrain/bind10_control.py
@@ -137,5 +137,8 @@ def send_command(step, command, cmdctl_port):
                                subprocess.PIPE, None)
     bindctl.stdin.write(command + "\n")
     bindctl.stdin.write("quit\n")
-    result = bindctl.wait()
-    assert result == 0, "bindctl exit code: " + str(result)
+    (stdout, stderr) = bindctl.communicate()
+    result = bindctl.returncode
+    assert result == 0, "bindctl exit code: " + str(result) +\
+                        "\nstdout:\n" + str(stdout) +\
+                        "stderr:\n" + str(stderr)
diff --git a/tests/lettuce/features/xfrin_bind10.feature b/tests/lettuce/features/xfrin_bind10.feature
index 23b2eda..70c3571 100644
--- a/tests/lettuce/features/xfrin_bind10.feature
+++ b/tests/lettuce/features/xfrin_bind10.feature
@@ -5,6 +5,7 @@ Feature: Xfrin
     Given I have bind10 running with configuration xfrin/retransfer_master.conf with cmdctl port 47804 as master
     And I have bind10 running with configuration xfrin/retransfer_slave.conf
     A query for www.example.org should have rcode REFUSED
+    Wait for bind10 stderr message CMDCTL_STARTED
     When I send bind10 the command Xfrin retransfer example.org IN 127.0.0.1 47807
     Then wait for new bind10 stderr message XFRIN_XFR_TRANSFER_SUCCESS not XFRIN_XFR_PROCESS_FAILURE
     A query for www.example.org should have rcode NOERROR




More information about the bind10-changes mailing list