BIND 10 trac1341, updated. 8f5429e41cb99c615abc1de0ee549702ca638217 Merge branch 'master' into trac1341

BIND 10 source code commits bind10-changes at lists.isc.org
Wed Nov 23 22:50:40 UTC 2011


The branch, trac1341 has been updated
       via  8f5429e41cb99c615abc1de0ee549702ca638217 (commit)
       via  24c2111ed800e95bc62901cd3b2970692a205578 (commit)
       via  f9224368908dd7ba16875b0d36329cf1161193f0 (commit)
       via  b8f67d200e64a2a9931b6d664781caf835f2ecd4 (commit)
       via  46bd9a8e6e3a543f97af6213bc7e43d619064aa7 (commit)
       via  3e19362bc1ba7dc67a87768e2b172c48b32417f5 (commit)
       via  758ab4461e8792e563ce1e0ad069d53b5e15d8dd (commit)
       via  b449ad20a4f58eb96aec8cd7dd7bb857bdb5d14b (commit)
       via  c41c32c11999a34a46d2e20155358438d769f767 (commit)
       via  181926059b0162e09c30b4b967b09294d713918e (commit)
       via  da3e9e54f1374d581d78f1d874ddafd427a622ab (commit)
       via  b34bf286c064d44746ec0b79e38a6177d01e6956 (commit)
       via  120946aa30b22c36995135b7d5bfcade4c26e192 (commit)
       via  78770f52c7f1e7268d99e8bfa8c61e889813bb33 (commit)
       via  498677a8877e4894fad598f9ec99974c414ef58c (commit)
       via  713160c9bed3d991a00b2ea5e7e3e7714d79625d (commit)
       via  b79e0ef1ad1ac5c64c8a131ea8e125ca6df066eb (commit)
       via  3d3592d4b1e7d3b0b3164067e57c1343db691851 (commit)
       via  84290dae3201ee83c8e4aad6f7e2b181d708811e (commit)
       via  9b6f54409617896742151c6aab9f5f318b7f53c5 (commit)
       via  36a5cd751a12ccbd31284ea19d0b10e8a5836b70 (commit)
       via  f1cb067ea86ab38810007ec6743e7c1f91042e99 (commit)
       via  6ddab5f4ea56162d0834e22a68605a1a427cc8c2 (commit)
       via  e4b99333e4c9946741148b6c95ed070653bec0fe (commit)
       via  b0cb2b651ec620418e891db0d21791beadb81906 (commit)
       via  e9e0f96594eec741393fa197c1d91362c96109e1 (commit)
       via  96e0aa96b5a2fd31833e9afe64bb8e4cc34e23c5 (commit)
       via  d9be597335af84bc93c9559bbd76fa85ef0f49c4 (commit)
       via  8a5b3e3b460e7f741b1560f73423c8d688db9d85 (commit)
       via  275d091229e914a848408b785f0143541abed6d5 (commit)
       via  b5553ef764f6c8cb0acea25e14b6e7a6a3a4cd47 (commit)
       via  bdde86c05963e9d491015e906c1b899609417887 (commit)
       via  eb53cae4b35f858436cc20bf28ad06cbdb2211ab (commit)
       via  11a4fe2def28da2ae83c94647a11fbb2114ec467 (commit)
       via  c2213ce7281be2aed47023a6f052bbec868a6028 (commit)
       via  045c30f0dffebb30ad8862986be435748ed0efb6 (commit)
       via  a6fd03e989a1fd5ae9514774bb3b3bb2a6668765 (commit)
       via  8c07f46adfdd748ee33b3b5e9d33a78a64dded10 (commit)
       via  235ff5af7733a7d464b172c4424f8facf284fed6 (commit)
       via  8f3f3e9da36c5a0cbbfa4e2a5ddc598be7fece4a (commit)
       via  fe04c9377836fcd387f79447906e7ec83911b5b2 (commit)
       via  43de15e4b0bd0094910ecc4f4365744cb6c1eeab (commit)
       via  5e2238494ec665b558a6bf3b6a2c7351c1e022ba (commit)
       via  755cd222be64019ea3b8db62e6d2643e6b6374c7 (commit)
       via  5720f2262f0a1e4b8b2dcb1b66b94431e0dc6ff2 (commit)
       via  8780f998204e96767785b29cd5b0e58cbeb10e1f (commit)
       via  0d94cca23a4f22d1bb953d62d38358a8b0e49f01 (commit)
       via  4215dabae27f7b9b089ff8fafef2ba5425062fc5 (commit)
       via  219879a5c8d6cb361d6d6f91d88c199930560994 (commit)
       via  7003eecf6f7792d140e74bac444fb00eb7b8415b (commit)
       via  7c6c725225eb89d9911b28aff0c6d80152e26aaf (commit)
       via  132e0b02edf9a0cebccd64a183eb56839f42606f (commit)
       via  2aac7b891f4ee43fa29bbd41ee3bd48c4a849010 (commit)
       via  bccc91bbd2496b87b408ebff3cd9c6880f952b1c (commit)
       via  88147da513fdb22eb4e430390746f36c96304c7e (commit)
       via  567f822d4758d13b84161d67118ac1bce08b4c47 (commit)
       via  f94f5bc089b09a77b34138bbf19ea71921a7950d (commit)
       via  e3406364189d62ba54d85c3d23b40cefd02af584 (commit)
       via  6da32eaece41f360a87388c44528dca979c10ab0 (commit)
       via  3dcdc74a5e0f8cb7fd0c6a3f6dee480e30199f03 (commit)
       via  7fb9faf4602b6b4feff4c940942c12be838a8153 (commit)
       via  d60907a85ba3f762b81189588d1b7317b95e0521 (commit)
       via  b88b05b2a779554a0e3c345933104d42046fffaa (commit)
       via  489a53541118413b38865c8a3cf84b24b8b7dfe2 (commit)
       via  63f04832f2604868133a23d110ce6df5a9707993 (commit)
       via  de07e6a0ab66de4d3c7720dc93bc7d9198c9d26b (commit)
       via  4ca71b858671d112fade23b449f2a59f14d1d300 (commit)
       via  2139076757c1a14ecce96eafd1388f978732f8aa (commit)
       via  ab47b771999bd12171e65a8a3fb2ee512b709c4b (commit)
       via  ebe4e57805eda25ca347e0a9db8adad11fb3d4b5 (commit)
       via  0f76bcddad8050baf811b0eaa5a117cc61dcbba1 (commit)
       via  f01fb1d89b20b23c0a680b1a97dc83e5a174e2e6 (commit)
       via  d2e805bb39d06f0ed47c49879909f35b5d341530 (commit)
       via  92794c72752a77005c2f9c7683fd2c65d7d802e9 (commit)
       via  046729c74341bb2ed1e6f60f81470cf6a6883000 (commit)
       via  36db2f897ac139ca9b71ccee07a7b1ba1e3aee7b (commit)
       via  3000256b60ee6a2c19a7188be4d17eca833ce869 (commit)
       via  edf044e9e2f1572b618ec2438cea1cad46432276 (commit)
       via  573abf93bec24753aebb5a6c70d8f50def521879 (commit)
       via  d287d9c92ecfb59d2c9f525cf79c7bb5167984f6 (commit)
       via  50e96053742a30584f91a6bdb4b788977cd166bf (commit)
       via  f1e08d75cabc45454a9bde86158dc8c7348d7f9d (commit)
       via  cc48074a9fec60ef9ba69991549f9e167e620225 (commit)
       via  7a5903389ed505f6c7ca4c87adf705216d11d1af (commit)
       via  8e8607c6faa34d9493a831054ecb64281f1f06c7 (commit)
       via  d99d546be040419fd49ad3be179eb2206f5023de (commit)
       via  4ab7d17edc10ce4f7b834709aa009aba4db9d877 (commit)
       via  df02b63fe1176c572a7eee996921f211ca970953 (commit)
       via  f8a64959bc5f3ddf68ba4d01bee092bf4f1f9558 (commit)
       via  7e96227163334ecd54e506bd2cedb58d3f6cf91d (commit)
       via  ca42fb6438b70ef569d00dc07b1bb23c0f6124f2 (commit)
       via  bcb37a2f6b11128620bb34a0c2d3dbf7334c0ab7 (commit)
       via  d17ae6dc7160a471abdd05f22aacc359df54b4e4 (commit)
       via  d9319841c509648c1ac18fec2c3d2b2c08313eb9 (commit)
       via  6d5f34008d7e793546fd990cad11e40268c0ff04 (commit)
       via  89b3af8226cb89bcc59ceff5e9547dbfc5b30665 (commit)
       via  d0a7cf4a98daf0ec8759640a91a12059cece6c6d (commit)
       via  5dc6be6febd523e202771cd11624efc29854349c (commit)
       via  f230c7d18b68d5c03131089a4f5c9739af7f9d83 (commit)
       via  e1682a42d23d36a3647878e13681dcd659622818 (commit)
       via  e45fa27d90ab3ea7b1081ca7d9513f63f5083b8d (commit)
       via  1e9bc2c16ef78f35ec35e340c696b4bdc10b47b2 (commit)
       via  85a2ce538c6f939ca539347676e5587228a29895 (commit)
       via  d1773b2ef6f98c26493ae76783158fc2ae6fbe52 (commit)
       via  2f51afcbc57c6d58e7d90f37962f3b93bc768e1b (commit)
       via  0b9c1b299f2078ab1a7bf08759a463eb179f0365 (commit)
       via  918c35143eb61d6e0ac96e98f2a95b12d55fdc0c (commit)
       via  480da1fe075da66aa8a144d37c23bac2fcfa1e2c (commit)
       via  81b1ba0e9cf67bc5e8ee6040b28436d4c64b72cc (commit)
       via  fc17063223655ab14b4db33bd63dd33fdc5ed5ac (commit)
       via  61feac8366f972b60410b925e36a9267338b3e9a (commit)
       via  2085b2255a79c0e5a04fe457bbb228d2fa24953b (commit)
       via  2d20ee347d82f840328c2bddd014cdf232962843 (commit)
       via  1ff0be2456cfaf9279970ae9a30a48d6267b96cf (commit)
       via  80447181a64656b97afa9ab71440907017e873f4 (commit)
       via  3878aa43817deaee33b21956d3066baef77a24ce (commit)
       via  01b4b95b5fb7aa99765f29ffc61f5131173148eb (commit)
       via  6b600cb1816705b04470ba2d0aca64dfdf8f55d2 (commit)
       via  c383ebc71434baa5cb314b3de3f3e18f39ebd0c7 (commit)
       via  d5ec22cc344998038cf68b6fdf309ad2a12b2b5e (commit)
       via  2024a1554624868e5f0a4d09b75c3ddf39dd342d (commit)
       via  10b6bc17b7c264f41dcdba64fc3a79904c06164a (commit)
       via  a48e7fa14f2ef90bce27ff3e7aa4a93165e08d37 (commit)
       via  62809f71c563128cb3cc467d867c621c61dbb926 (commit)
       via  08d090b4685220d3f286e1506e1a3c884146122f (commit)
       via  7b667cbd3bd3baeaceb60b987ab9770684ff5038 (commit)
       via  ab48bd8228405d50b149c502d7f73b5eb1a57608 (commit)
       via  ecf9f48f4b4c3beaf97ae0e83c11f4547f024734 (commit)
       via  4d39e13d7f5ae5c30277f602f669f0421e2bf05c (commit)
       via  3bf84d5c678f9f86df6382cf30e694404e2f77cb (commit)
       via  12a6217c59bf48ead2e11aaaedb774af7a618701 (commit)
       via  cb57c9fcaa897752dd7599dcc15d647fb880285f (commit)
       via  1294219279910a89d4a99e6292cea8e13a4c301e (commit)
       via  61dd61b8f259b0938646fa2539fe928608a0fbad (commit)
       via  f1306bd835659173f3fffcfbe1bf8971dc62efd9 (commit)
       via  7cc8a7d6c32472021b34e43d43a288cfa263f007 (commit)
      from  52fa244c12eb6d708b0260b5554396df5d00b079 (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
commit 8f5429e41cb99c615abc1de0ee549702ca638217
Merge: 52fa244c12eb6d708b0260b5554396df5d00b079 24c2111ed800e95bc62901cd3b2970692a205578
Author: Jeremy C. Reed <jreed at ISC.org>
Date:   Wed Nov 23 16:49:07 2011 -0600

    Merge branch 'master' into trac1341

-----------------------------------------------------------------------

Summary of changes:
 ChangeLog                                          |   30 +-
 src/bin/bind10/bind10.xml                          |   15 -
 src/bin/bind10/bind10_src.py.in                    |   60 +-
 src/bin/bind10/bob.spec                            |    2 +-
 src/bin/bind10/tests/bind10_test.py.in             |   45 +-
 src/bin/stats/stats-httpd-xml.tpl                  |   23 +-
 src/bin/stats/stats-httpd-xsd.tpl                  |   38 +-
 src/bin/stats/stats-httpd-xsl.tpl                  |   27 +-
 src/bin/stats/stats.py.in                          |    8 +-
 src/bin/stats/stats_httpd.py.in                    |  511 ++++++++++---
 src/bin/stats/stats_httpd_messages.mes             |    6 +
 src/bin/stats/tests/Makefile.am                    |    2 +-
 src/bin/stats/tests/b10-stats-httpd_test.py        |  823 +++++++++++++++++---
 src/bin/stats/tests/b10-stats_test.py              |  194 ++++-
 src/bin/stats/tests/test_utils.py                  |   59 ++-
 src/bin/xfrin/tests/Makefile.am                    |    2 +-
 src/bin/xfrout/tests/Makefile.am                   |    8 +
 src/bin/xfrout/tests/testdata/example.com          |    6 +
 .../tests/testdata/test.sqlite3}                   |  Bin 11264 -> 11264 bytes
 src/bin/xfrout/tests/xfrout_test.py.in             |  383 ++++++----
 src/bin/xfrout/xfrout.py.in                        |  289 +++++---
 src/bin/xfrout/xfrout_messages.mes                 |   42 +-
 src/lib/datasrc/client.h                           |   37 +-
 src/lib/datasrc/data_source.h                      |   12 +
 src/lib/datasrc/database.cc                        |  190 ++++--
 src/lib/datasrc/database.h                         |   66 ++-
 src/lib/datasrc/memory_datasrc.cc                  |   57 +-
 src/lib/datasrc/memory_datasrc.h                   |    5 +-
 src/lib/datasrc/sqlite3_accessor.cc                |  326 +++++++-
 src/lib/datasrc/sqlite3_accessor.h                 |   61 ++-
 src/lib/datasrc/tests/Makefile.am                  |    1 +
 src/lib/datasrc/tests/client_unittest.cc           |    4 +-
 src/lib/datasrc/tests/database_unittest.cc         |  277 +++++++-
 src/lib/datasrc/tests/memory_datasrc_unittest.cc   |   48 ++
 src/lib/datasrc/tests/sqlite3_accessor_unittest.cc |  203 ++++--
 src/lib/datasrc/tests/testdata/brokendb.sqlite3    |  Bin 2048 -> 4096 bytes
 .../lib/datasrc/tests/testdata/diffs.sqlite3       |  Bin 14336 -> 16384 bytes
 src/lib/datasrc/tests/testdata/diffs_table.sql     |  123 +++
 src/lib/datasrc/tests/testdata/example.org.sqlite3 |  Bin 14336 -> 14336 bytes
 .../datasrc/tests/testdata/example2.com.sqlite3    |  Bin 11264 -> 14336 bytes
 src/lib/datasrc/tests/testdata/rwtest.sqlite3      |  Bin 11264 -> 13312 bytes
 src/lib/datasrc/tests/testdata/test-root.sqlite3   |  Bin 14336 -> 17408 bytes
 src/lib/datasrc/zone.h                             |   14 +
 src/lib/dns/rdata/generic/soa_6.cc                 |    6 +
 src/lib/dns/rdata/generic/soa_6.h                  |    2 +
 src/lib/dns/tests/rdata_soa_unittest.cc            |    5 +
 src/lib/python/isc/bind10/component.py             |    2 +-
 src/lib/python/isc/bind10/special_component.py     |    6 +
 src/lib/python/isc/datasrc/client_inc.cc           |   32 +-
 src/lib/python/isc/datasrc/client_python.cc        |   42 +-
 src/lib/python/isc/datasrc/tests/datasrc_test.py   |  161 ++++-
 .../isc/datasrc/tests/testdata/example.com.sqlite3 |  Bin 43008 -> 44032 bytes
 src/lib/python/isc/notify/notify_out.py            |  155 +++--
 src/lib/python/isc/notify/notify_out_messages.mes  |   21 +
 src/lib/python/isc/notify/tests/Makefile.am        |    9 +
 src/lib/python/isc/notify/tests/notify_out_test.py |   73 +--
 .../isc/notify/tests/testdata/brokentest.sqlite3}  |  Bin 11264 -> 11264 bytes
 .../python/isc/notify/tests/testdata/example.com   |   10 +
 .../python/isc/notify/tests/testdata/example.net   |   14 +
 .../isc/notify/tests/testdata/multisoa.example     |    5 +
 .../python/isc/notify/tests/testdata/nons.example  |    3 +
 .../python/isc/notify/tests/testdata/nosoa.example |    7 +
 .../python/isc/notify/tests/testdata/test.sqlite3} |  Bin 11264 -> 13312 bytes
 63 files changed, 3571 insertions(+), 979 deletions(-)
 create mode 100644 src/bin/xfrout/tests/testdata/example.com
 copy src/bin/{xfrin/tests/testdata/example.com.sqlite3 => xfrout/tests/testdata/test.sqlite3} (89%)
 copy tests/lettuce/data/example.org.sqlite3 => src/lib/datasrc/tests/testdata/diffs.sqlite3 (68%)
 create mode 100644 src/lib/datasrc/tests/testdata/diffs_table.sql
 copy src/{bin/xfrin/tests/testdata/example.com.sqlite3 => lib/python/isc/notify/tests/testdata/brokentest.sqlite3} (77%)
 create mode 100644 src/lib/python/isc/notify/tests/testdata/example.com
 create mode 100644 src/lib/python/isc/notify/tests/testdata/example.net
 create mode 100644 src/lib/python/isc/notify/tests/testdata/multisoa.example
 create mode 100644 src/lib/python/isc/notify/tests/testdata/nons.example
 create mode 100644 src/lib/python/isc/notify/tests/testdata/nosoa.example
 copy src/{bin/xfrin/tests/testdata/example.com.sqlite3 => lib/python/isc/notify/tests/testdata/test.sqlite3} (69%)

-----------------------------------------------------------------------
diff --git a/ChangeLog b/ChangeLog
index 4a72bbc..c04e845 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,31 @@
+320.	[func]*		vorner
+	The --brittle switch was removed from the bind10 executable. It didn't
+	work after the #213 change and the same effect can be accomplished by
+	declaring all components as core.
+	(Trac #1340, git f9224368908dd7ba16875b0d36329cf1161193f0)
+
+319.	[func]		naokikambe
+	b10-stats-httpd was updated. In addition of the access to all
+	statistics items of all modules, the specified item or the items of the
+	specified module name can be accessed. For example, the URI requested
+	by using the feature is showed as "/bind10/statistics/xml/Auth" or
+	"/bind10/statistics/xml/Auth/queries.tcp". The list of all possible
+	module names and all possible item names can be showed in the root
+	document, whose URI is "/bind10/statistics/xml". This change is not
+	only for the XML documents but also is for the XSD and XSL documents.
+	(Trac #917, git b34bf286c064d44746ec0b79e38a6177d01e6956)
+
+318.    [func]      stephen
+	Add C++ API for accessing zone difference information in database-based
+	data sources.
+	(Trac #1330, git 78770f52c7f1e7268d99e8bfa8c61e889813bb33)
+
+317.    [func]      vorner
+	datasrc: the getUpdater method of DataSourceClient supports an optional
+	'journaling' parameter to indicate the generated updater to store diffs.
+	The database based derived class implements this extension.
+	(Trac #1331, git 713160c9bed3d991a00b2ea5e7e3e7714d79625d)
+
 316.	[func]*		vorner
 	The configuration of what parts of the system run is more flexible now.
 	Everything that should run must have an entry in Boss/components.
@@ -484,7 +512,7 @@ bind10-devel-20110705 released on July 05, 2011
 	(Trac #542, git 1aa773d84cd6431aa1483eb34a7f4204949a610f)
 
 243.	[func]*		feng
-	Add optional hmac algorithm SHA224/384/812.
+	Add optional hmac algorithm SHA224/384/512.
 	(Trac #782, git 77d792c9d7c1a3f95d3e6a8b721ac79002cd7db1)
 
 bind10-devel-20110519 released on May 19, 2011
diff --git a/src/bin/bind10/bind10.xml b/src/bin/bind10/bind10.xml
index ccddff3..6705760 100644
--- a/src/bin/bind10/bind10.xml
+++ b/src/bin/bind10/bind10.xml
@@ -51,7 +51,6 @@
       <arg><option>-u <replaceable>user</replaceable></option></arg>
       <arg><option>-v</option></arg>
       <arg><option>-w <replaceable>wait_time</replaceable></option></arg>
-      <arg><option>--brittle</option></arg>
       <arg><option>--cmdctl-port</option> <replaceable>port</replaceable></arg>
       <arg><option>--config-file</option> <replaceable>config-filename</replaceable></arg>
       <arg><option>--data-path</option> <replaceable>directory</replaceable></arg>
@@ -92,20 +91,6 @@
 
       <varlistentry>
         <term>
-          <option>--brittle</option>
-        </term>
-        <listitem>
-          <para>
-	    Shutdown if any of the child processes of
-	    <command>bind10</command> exit.  This is intended to
-	    help developers debug the server, and should not be
-	    used in production.
-          </para>
-        </listitem>
-      </varlistentry>
-
-      <varlistentry>
-        <term>
           <option>-c</option> <replaceable>config-filename</replaceable>,
           <option>--config-file</option> <replaceable>config-filename</replaceable>
         </term>
diff --git a/src/bin/bind10/bind10_src.py.in b/src/bin/bind10/bind10_src.py.in
index e3e045b..4d8633e 100755
--- a/src/bin/bind10/bind10_src.py.in
+++ b/src/bin/bind10/bind10_src.py.in
@@ -219,7 +219,7 @@ class BoB:
     
     def __init__(self, msgq_socket_file=None, data_path=None,
     config_filename=None, nocache=False, verbose=False, setuid=None,
-    username=None, cmdctl_port=None, brittle=False, wait_time=10):
+    username=None, cmdctl_port=None, wait_time=10):
         """
             Initialize the Boss of BIND. This is a singleton (only one can run).
         
@@ -233,9 +233,6 @@ class BoB:
             The cmdctl_port is passed to cmdctl and specify on which port it
             should listen.
 
-            brittle is a debug option that controls whether the Boss shuts down
-            after any process dies.
-
             wait_time controls the amount of time (in seconds) that Boss waits
             for selected processes to initialize before continuing with the
             initialization.  Currently this is only the configuration manager.
@@ -264,7 +261,6 @@ class BoB:
         self.data_path = data_path
         self.config_filename = config_filename
         self.cmdctl_port = cmdctl_port
-        self.brittle = brittle
         self.wait_time = wait_time
         self._component_configurator = isc.bind10.component.Configurator(self,
             isc.bind10.special_component.get_specials())
@@ -628,21 +624,10 @@ class BoB:
         # ... and start
         return self.start_process("b10-resolver", resargs, self.c_channel_env)
 
-    def start_cmdctl(self):
-        """
-            Starts the command control process
-        """
-        args = ["b10-cmdctl"]
-        if self.cmdctl_port is not None:
-            args.append("--port=" + str(self.cmdctl_port))
-        if self.verbose:
-            args.append("-v")
-        return self.start_process("b10-cmdctl", args, self.c_channel_env,
-                                  self.cmdctl_port)
-
-    def start_xfrin(self):
-        # XXX: a quick-hack workaround.  xfrin will implicitly use dynamically
-        # loadable data source modules, which will be installed in $(libdir).
+    def __ld_path_hack(self):
+        # XXX: a quick-hack workaround.  xfrin/out will implicitly use
+        # dynamically loadable data source modules, which will be installed in
+        # $(libdir).
         # On some OSes (including MacOS X and *BSDs) the main process (python)
         # cannot find the modules unless they are located in a common shared
         # object path or a path in the (DY)LD_LIBRARY_PATH.  We should seek
@@ -655,21 +640,44 @@ class BoB:
         # the same as for the libexec path addition
         # TODO: Once #1292 is finished, remove this method and the special
         # component, use it as normal component.
-        c_channel_env = dict(self.c_channel_env)
+        env = dict(self.c_channel_env)
         if ADD_LIBEXEC_PATH:
             cur_path = os.getenv('DYLD_LIBRARY_PATH')
             cur_path = '' if cur_path is None else ':' + cur_path
-            c_channel_env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+            env['DYLD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
 
             cur_path = os.getenv('LD_LIBRARY_PATH')
             cur_path = '' if cur_path is None else ':' + cur_path
-            c_channel_env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+            env['LD_LIBRARY_PATH'] = "@@LIBDIR@@" + cur_path
+        return env
+
+    def start_cmdctl(self):
+        """
+            Starts the command control process
+        """
+        args = ["b10-cmdctl"]
+        if self.cmdctl_port is not None:
+            args.append("--port=" + str(self.cmdctl_port))
+        if self.verbose:
+            args.append("-v")
+        return self.start_process("b10-cmdctl", args, self.c_channel_env,
+                                  self.cmdctl_port)
+
+    def start_xfrin(self):
         # Set up the command arguments.
         args = ['b10-xfrin']
         if self.verbose:
             args += ['-v']
 
-        return self.start_process("b10-xfrin", args, c_channel_env)
+        return self.start_process("b10-xfrin", args, self.__ld_path_hack())
+
+    def start_xfrout(self):
+        # Set up the command arguments.
+        args = ['b10-xfrout']
+        if self.verbose:
+            args += ['-v']
+
+        return self.start_process("b10-xfrout", args, self.__ld_path_hack())
 
     def start_all_components(self):
         """
@@ -928,8 +936,6 @@ def parse_args(args=sys.argv[1:], Parser=OptionParser):
     parser.add_option("--pid-file", dest="pid_file", type="string",
                       default=None,
                       help="file to dump the PID of the BIND 10 process")
-    parser.add_option("--brittle", dest="brittle", action="store_true",
-                      help="debugging flag: exit if any component dies")
     parser.add_option("-w", "--wait", dest="wait_time", type="int",
                       default=10, help="Time (in seconds) to wait for config manager to start up")
 
@@ -1034,7 +1040,7 @@ def main():
     # Go bob!
     boss_of_bind = BoB(options.msgq_socket_file, options.data_path,
                        options.config_file, options.nocache, options.verbose,
-                       setuid, username, options.cmdctl_port, options.brittle,
+                       setuid, username, options.cmdctl_port,
                        options.wait_time)
     startup_result = boss_of_bind.startup()
     if startup_result:
diff --git a/src/bin/bind10/bob.spec b/src/bin/bind10/bob.spec
index 4a3cc85..4267b70 100644
--- a/src/bin/bind10/bob.spec
+++ b/src/bin/bind10/bob.spec
@@ -15,7 +15,7 @@
             "kind": "dispensable"
           },
           "b10-xfrin": { "special": "xfrin", "kind": "dispensable" },
-          "b10-xfrout": { "address": "Xfrout", "kind": "dispensable" },
+          "b10-xfrout": { "special": "xfrout", "kind": "dispensable" },
           "b10-zonemgr": { "address": "Zonemgr", "kind": "dispensable" },
           "b10-stats": { "address": "Stats", "kind": "dispensable" },
           "b10-stats-httpd": {
diff --git a/src/bin/bind10/tests/bind10_test.py.in b/src/bin/bind10/tests/bind10_test.py.in
index 7924cc0..bad5b04 100644
--- a/src/bin/bind10/tests/bind10_test.py.in
+++ b/src/bin/bind10/tests/bind10_test.py.in
@@ -274,8 +274,7 @@ class MockBob(BoB):
         return procinfo
 
     def start_simple(self, name):
-        procmap = { 'b10-xfrout': self.start_xfrout,
-                    'b10-zonemgr': self.start_zonemgr,
+        procmap = { 'b10-zonemgr': self.start_zonemgr,
                     'b10-stats': self.start_stats,
                     'b10-stats-httpd': self.start_stats_httpd,
                     'b10-cmdctl': self.start_cmdctl,
@@ -475,7 +474,7 @@ class TestStartStopProcessesBob(unittest.TestCase):
         if start_auth:
             config['b10-auth'] = { 'kind': 'needed', 'special': 'auth' }
             config['b10-xfrout'] = { 'kind': 'dispensable',
-                                     'address': 'Xfrout' }
+                                     'special': 'xfrout' }
             config['b10-xfrin'] = { 'kind': 'dispensable', 'special': 'xfrin' }
             config['b10-zonemgr'] = { 'kind': 'dispensable',
                                       'address': 'Zonemgr' }
@@ -741,15 +740,6 @@ class TestParseArgs(unittest.TestCase):
         options = parse_args(['--cmdctl-port=1234'], TestOptParser)
         self.assertEqual(1234, options.cmdctl_port)
 
-    def test_brittle(self):
-        """
-        Test we can use the "brittle" flag.
-        """
-        options = parse_args([], TestOptParser)
-        self.assertFalse(options.brittle)
-        options = parse_args(['--brittle'], TestOptParser)
-        self.assertTrue(options.brittle)
-
 class TestPIDFile(unittest.TestCase):
     def setUp(self):
         self.pid_file = '@builddir@' + os.sep + 'bind10.pid'
@@ -797,37 +787,6 @@ class TestPIDFile(unittest.TestCase):
         self.assertRaises(IOError, dump_pid,
                           'nonexistent_dir' + os.sep + 'bind10.pid')
 
-# TODO: Do we want brittle mode? Probably yes. So we need to re-enable to after that.
- at unittest.skip("Brittle mode temporarily broken")
-class TestBrittle(unittest.TestCase):
-    def test_brittle_disabled(self):
-        bob = MockBob()
-        bob.start_all_components()
-        bob.runnable = True
-
-        bob.reap_children()
-        self.assertTrue(bob.runnable)
-
-    def simulated_exit(self):
-        ret_val = self.exit_info
-        self.exit_info = (0, 0)
-        return ret_val
-
-    def test_brittle_enabled(self):
-        bob = MockBob()
-        bob.start_all_components()
-        bob.runnable = True
-
-        bob.brittle = True
-        self.exit_info = (5, 0)
-        bob._get_process_exit_status = self.simulated_exit
-
-        old_stdout = sys.stdout
-        sys.stdout = open("/dev/null", "w")
-        bob.reap_children()
-        sys.stdout = old_stdout
-        self.assertFalse(bob.runnable)
-
 class TestBossComponents(unittest.TestCase):
     """
     Test the boss propagates component configuration properly to the
diff --git a/src/bin/stats/stats-httpd-xml.tpl b/src/bin/stats/stats-httpd-xml.tpl
index d5846ad..ed91423 100644
--- a/src/bin/stats/stats-httpd-xml.tpl
+++ b/src/bin/stats/stats-httpd-xml.tpl
@@ -1,24 +1,3 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <?xml-stylesheet type="text/xsl" href="$xsl_url_path"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<stats:stats_data version="1.0"
-  xmlns:stats="$xsd_namespace"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="$xsd_namespace $xsd_url_path">
-  $xml_string
-</stats:stats_data>
+$xml_string
\ No newline at end of file
diff --git a/src/bin/stats/stats-httpd-xsd.tpl b/src/bin/stats/stats-httpd-xsd.tpl
index 6ad1280..cc5578a 100644
--- a/src/bin/stats/stats-httpd-xsd.tpl
+++ b/src/bin/stats/stats-httpd-xsd.tpl
@@ -1,38 +1,2 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
-<schema targetNamespace="$xsd_namespace"
-  xmlns="http://www.w3.org/2001/XMLSchema"
-  xmlns:stats="$xsd_namespace">
-  <annotation>
-    <documentation xml:lang="en">XML schema of the statistics
-      data in BIND 10</documentation>
-  </annotation>
-  <element name="stats_data">
-    <annotation>
-      <documentation>A set of statistics data</documentation>
-    </annotation>
-    <complexType>
-      $xsd_string
-      <attribute name="version" type="token" use="optional" default="1.0">
-        <annotation>
-          <documentation>Version number of syntax</documentation>
-        </annotation>
-      </attribute>
-    </complexType>
-  </element>
-</schema>
+$xsd_string
diff --git a/src/bin/stats/stats-httpd-xsl.tpl b/src/bin/stats/stats-httpd-xsl.tpl
index a1f6406..7c2e7ae 100644
--- a/src/bin/stats/stats-httpd-xsl.tpl
+++ b/src/bin/stats/stats-httpd-xsl.tpl
@@ -1,23 +1,7 @@
 <?xml version="1.0" encoding="UTF-8"?>
-<!--
- - Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
--->
-
 <xsl:stylesheet version="1.0"
   xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns="http://www.w3.org/1999/xhtml"
-  xmlns:stats="$xsd_namespace">
+  xmlns:bind10="$xsd_namespace">
   <xsl:output method="html" encoding="UTF-8"
     doctype-public="-//W3C//DTD HTML 4.01 Transitional//EN"
     doctype-system=" http://www.w3.org/TR/html4/loose.dtd " />
@@ -42,14 +26,7 @@ td.title {
       </head>
       <body>
         <h1>BIND 10 Statistics</h1>
-        <table>
-          <tr>
-            <th>Owner</th>
-            <th>Title</th>
-            <th>Value</th>
-          </tr>
-          <xsl:apply-templates />
-        </table>
+        <xsl:apply-templates />
       </body>
     </html>
   </xsl:template>
diff --git a/src/bin/stats/stats.py.in b/src/bin/stats/stats.py.in
index 3a7f47a..51c4e09 100755
--- a/src/bin/stats/stats.py.in
+++ b/src/bin/stats/stats.py.in
@@ -246,12 +246,12 @@ class Stats:
         self.update_statistics_data()
         if owner and name:
             try:
-                return self.statistics_data[owner][name]
+                return {owner:{name:self.statistics_data[owner][name]}}
             except KeyError:
                 pass
         elif owner:
             try:
-                return self.statistics_data[owner]
+                return {owner: self.statistics_data[owner]}
             except KeyError:
                 pass
         elif name:
@@ -360,9 +360,9 @@ class Stats:
         if owner:
             try:
                 if name:
-                    return isc.config.create_answer(0, schema_byname[owner][name])
+                    return isc.config.create_answer(0, {owner:[schema_byname[owner][name]]})
                 else:
-                    return isc.config.create_answer(0, schema[owner])
+                    return isc.config.create_answer(0, {owner:schema[owner]})
             except KeyError:
                 pass
         else:
diff --git a/src/bin/stats/stats_httpd.py.in b/src/bin/stats/stats_httpd.py.in
index 042630d..f265abb 100644
--- a/src/bin/stats/stats_httpd.py.in
+++ b/src/bin/stats/stats_httpd.py.in
@@ -29,6 +29,7 @@ import http.server
 import socket
 import string
 import xml.etree.ElementTree
+import urllib.parse
 
 import isc.cc
 import isc.config
@@ -66,7 +67,7 @@ XML_URL_PATH = '/bind10/statistics/xml'
 XSD_URL_PATH = '/bind10/statistics/xsd'
 XSL_URL_PATH = '/bind10/statistics/xsl'
 # TODO: This should be considered later.
-XSD_NAMESPACE = 'http://bind10.isc.org' + XSD_URL_PATH
+XSD_NAMESPACE = 'http://bind10.isc.org/bind10'
 
 # Assign this process name
 isc.util.process.rename()
@@ -85,14 +86,29 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
 
     def send_head(self):
         try:
-            if self.path == XML_URL_PATH:
-                body = self.server.xml_handler()
-            elif self.path == XSD_URL_PATH:
-                body = self.server.xsd_handler()
-            elif self.path == XSL_URL_PATH:
-                body = self.server.xsl_handler()
+            req_path = self.path
+            req_path = urllib.parse.urlsplit(req_path).path
+            req_path = urllib.parse.unquote(req_path)
+            req_path = os.path.normpath(req_path)
+            path_dirs = req_path.split('/')
+            path_dirs = [ d for d in filter(None, path_dirs) ]
+            req_path = '/'+"/".join(path_dirs)
+            module_name = None
+            item_name = None
+            # in case of /bind10/statistics/xxx/YYY/zzz
+            if len(path_dirs) >= 5:
+                item_name = path_dirs[4]
+            # in case of /bind10/statistics/xxx/YYY ...
+            if len(path_dirs) >= 4:
+                module_name = path_dirs[3]
+            if req_path == '/'.join([XML_URL_PATH] + path_dirs[3:5]):
+                body = self.server.xml_handler(module_name, item_name)
+            elif req_path == '/'.join([XSD_URL_PATH] + path_dirs[3:5]):
+                body = self.server.xsd_handler(module_name, item_name)
+            elif req_path == '/'.join([XSL_URL_PATH] + path_dirs[3:5]):
+                body = self.server.xsl_handler(module_name, item_name)
             else:
-                if self.path == '/' and 'Host' in self.headers.keys():
+                if req_path == '/' and 'Host' in self.headers.keys():
                     # redirect to XML URL only when requested with '/'
                     self.send_response(302)
                     self.send_header(
@@ -104,6 +120,12 @@ class HttpHandler(http.server.BaseHTTPRequestHandler):
                     # Couldn't find HOST
                     self.send_error(404)
                     return None
+        except StatsHttpdDataError as err:
+            # Couldn't find neither specified module name nor
+            # specified item name
+            self.send_error(404)
+            logger.error(STATHTTPD_SERVER_DATAERROR, err)
+            return None
         except StatsHttpdError as err:
             self.send_error(500)
             logger.error(STATHTTPD_SERVER_ERROR, err)
@@ -145,6 +167,12 @@ class StatsHttpdError(Exception):
     main routine."""
     pass
 
+class StatsHttpdDataError(Exception):
+    """Exception class for StatsHttpd class. The reason seems to be
+    due to the data. It is intended to be thrown from the the
+    StatsHttpd object to the HttpHandler object or main routine."""
+    pass
+
 class StatsHttpd:
     """The main class of HTTP server of HTTP/XML interface for
     statistics module. It handles HTTP requests, and command channel
@@ -334,12 +362,27 @@ class StatsHttpd:
             return isc.config.ccsession.create_answer(
                 1, "Unknown command: " + str(command))
 
-    def get_stats_data(self):
+    def get_stats_data(self, owner=None, name=None):
         """Requests statistics data to the Stats daemon and returns
-        the data which obtains from it"""
+        the data which obtains from it. The first argument is the
+        module name which owns the statistics data, the second
+        argument is one name of the statistics items which the the
+        module owns. The second argument cannot be specified when the
+        first argument is not specified. It returns the statistics
+        data of the specified module or item. When the session timeout
+        or the session error is occurred, it raises
+        StatsHttpdError. When the stats daemon returns none-zero
+        value, it raises StatsHttpdDataError."""
+        param = {}
+        if owner is None and name is None:
+            param = None
+        if owner is not None:
+            param['owner'] = owner
+        if name is not None:
+            param['name'] = name
         try:
             seq = self.cc_session.group_sendmsg(
-                isc.config.ccsession.create_command('show'), 'Stats')
+                isc.config.ccsession.create_command('show', param), 'Stats')
             (answer, env) = self.cc_session.group_recvmsg(False, seq)
             if answer:
                 (rcode, value) = isc.config.ccsession.parse_answer(answer)
@@ -351,131 +394,409 @@ class StatsHttpd:
             if rcode == 0:
                 return value
             else:
-                raise StatsHttpdError("Stats module: %s" % str(value))
+                raise StatsHttpdDataError("Stats module: %s" % str(value))
 
-    def get_stats_spec(self):
+    def get_stats_spec(self, owner=None, name=None):
         """Requests statistics data to the Stats daemon and returns
-        the data which obtains from it"""
+        the data which obtains from it. The first argument is the
+        module name which owns the statistics data, the second
+        argument is one name of the statistics items which the the
+        module owns. The second argument cannot be specified when the
+        first argument is not specified. It returns the statistics
+        specification of the specified module or item. When the
+        session timeout or the session error is occurred, it raises
+        StatsHttpdError. When the stats daemon returns none-zero
+        value, it raises StatsHttpdDataError."""
+        param = {}
+        if owner is None and name is None:
+            param = None
+        if owner is not None:
+            param['owner'] = owner
+        if name is not None:
+            param['name'] = name
         try:
             seq = self.cc_session.group_sendmsg(
-                isc.config.ccsession.create_command('showschema'), 'Stats')
+                isc.config.ccsession.create_command('showschema', param), 'Stats')
             (answer, env) = self.cc_session.group_recvmsg(False, seq)
             if answer:
                 (rcode, value) = isc.config.ccsession.parse_answer(answer)
                 if rcode == 0:
                     return value
                 else:
-                    raise StatsHttpdError("Stats module: %s" % str(value))
+                    raise StatsHttpdDataError("Stats module: %s" % str(value))
         except (isc.cc.session.SessionTimeout,
                 isc.cc.session.SessionError) as err:
             raise StatsHttpdError("%s: %s" %
                                   (err.__class__.__name__, err))
 
-    def xml_handler(self):
-        """Handler which requests to Stats daemon to obtain statistics
-        data and returns the body of XML document"""
-        xml_list=[]
-        for (mod, spec) in self.get_stats_data().items():
-            if not spec: continue
-            elem1 = xml.etree.ElementTree.Element(str(mod))
-            for (k, v) in spec.items():
-                elem2 = xml.etree.ElementTree.Element(str(k))
-                elem2.text = str(v)
-                elem1.append(elem2)
-            # The coding conversion is tricky. xml..tostring() of Python 3.2
-            # returns bytes (not string) regardless of the coding, while
-            # tostring() of Python 3.1 returns a string.  To support both
-            # cases transparently, we first make sure tostring() returns
-            # bytes by specifying utf-8 and then convert the result to a
-            # plain string (code below assume it).
-            xml_list.append(
-                str(xml.etree.ElementTree.tostring(elem1, encoding='utf-8'),
-                    encoding='us-ascii'))
-        xml_string = "".join(xml_list)
+
+    def xml_handler(self, module_name=None, item_name=None):
+        """Requests the specified statistics data and specification by
+        using the functions get_stats_data and get_stats_spec
+        respectively and loads the XML template file and returns the
+        string of the XML document.The first argument is the module
+        name which owns the statistics data, the second argument is
+        one name of the statistics items which the the module
+        owns. The second argument cannot be specified when the first
+        argument is not specified."""
+
+        # TODO: Separate the following recursive function by type of
+        # the parameter. Because we should be sure what type there is
+        # when we call it recursively.
+        def stats_data2xml(stats_spec, stats_data, xml_elem):
+            """Internal use for xml_handler. Reads stats_data and
+            stats_spec specified as first and second arguments, and
+            modify the xml object specified as third
+            argument. xml_elem must be modified and always returns
+            None."""
+            # assumed started with module_spec or started with
+            # item_spec in statistics
+            if type(stats_spec) is dict:
+                # assumed started with module_spec
+                if 'item_name' not in stats_spec \
+                        and 'item_type' not in stats_spec:
+                    for module_name in stats_spec.keys():
+                        elem = xml.etree.ElementTree.Element(module_name)
+                        stats_data2xml(stats_spec[module_name],
+                                       stats_data[module_name], elem)
+                        xml_elem.append(elem)
+                # started with item_spec in statistics
+                else:
+                    elem = xml.etree.ElementTree.Element(stats_spec['item_name'])
+                    if stats_spec['item_type'] == 'map':
+                        stats_data2xml(stats_spec['map_item_spec'],
+                                       stats_data,
+                                       elem)
+                    elif stats_spec['item_type'] == 'list':
+                        for item in stats_data:
+                            stats_data2xml(stats_spec['list_item_spec'],
+                                           item, elem)
+                    else:
+                        elem.text = str(stats_data)
+                    xml_elem.append(elem)
+            # assumed started with stats_spec
+            elif type(stats_spec) is list:
+                for item_spec in stats_spec:
+                    stats_data2xml(item_spec,
+                                   stats_data[item_spec['item_name']],
+                                   xml_elem)
+
+        stats_spec = self.get_stats_spec(module_name, item_name)
+        stats_data = self.get_stats_data(module_name, item_name)
+        # make the path xxx/module/item if specified respectively
+        path_info = ''
+        if module_name is not None and item_name is not None:
+            path_info = '/' + module_name + '/' + item_name
+        elif module_name is not None:
+            path_info = '/' + module_name
+        xml_elem = xml.etree.ElementTree.Element(
+            'bind10:statistics',
+            attrib={ 'xsi:schemaLocation' : XSD_NAMESPACE + ' ' + XSD_URL_PATH + path_info,
+                     'xmlns:bind10' : XSD_NAMESPACE,
+                     'xmlns:xsi' : "http://www.w3.org/2001/XMLSchema-instance" })
+        stats_data2xml(stats_spec, stats_data, xml_elem)
+        # The coding conversion is tricky. xml..tostring() of Python 3.2
+        # returns bytes (not string) regardless of the coding, while
+        # tostring() of Python 3.1 returns a string.  To support both
+        # cases transparently, we first make sure tostring() returns
+        # bytes by specifying utf-8 and then convert the result to a
+        # plain string (code below assume it).
+        # FIXME: Non-ASCII characters might be lost here. Consider how
+        # the whole system should handle non-ASCII characters.
+        xml_string = str(xml.etree.ElementTree.tostring(xml_elem, encoding='utf-8'),
+                         encoding='us-ascii')
         self.xml_body = self.open_template(XML_TEMPLATE_LOCATION).substitute(
             xml_string=xml_string,
-            xsd_namespace=XSD_NAMESPACE,
-            xsd_url_path=XSD_URL_PATH,
-            xsl_url_path=XSL_URL_PATH)
+            xsl_url_path=XSL_URL_PATH + path_info)
         assert self.xml_body is not None
         return self.xml_body
 
-    def xsd_handler(self):
-        """Handler which just returns the body of XSD document"""
+    def xsd_handler(self, module_name=None, item_name=None):
+        """Requests the specified statistics specification by using
+        the function get_stats_spec respectively and loads the XSD
+        template file and returns the string of the XSD document.The
+        first argument is the module name which owns the statistics
+        data, the second argument is one name of the statistics items
+        which the the module owns. The second argument cannot be
+        specified when the first argument is not specified."""
+
+        # TODO: Separate the following recursive function by type of
+        # the parameter. Because we should be sure what type there is
+        # when we call it recursively.
+        def stats_spec2xsd(stats_spec, xsd_elem):
+            """Internal use for xsd_handler. Reads stats_spec
+            specified as first arguments, and modify the xml object
+            specified as second argument. xsd_elem must be
+            modified. Always returns None with no exceptions."""
+            # assumed module_spec or one stats_spec
+            if type(stats_spec) is dict:
+                # assumed module_spec
+                if 'item_name' not in stats_spec:
+                    for mod in stats_spec.keys():
+                        elem = xml.etree.ElementTree.Element(
+                            "element", { "name" : mod })
+                        complextype = xml.etree.ElementTree.Element("complexType")
+                        alltag = xml.etree.ElementTree.Element("all")
+                        stats_spec2xsd(stats_spec[mod], alltag)
+                        complextype.append(alltag)
+                        elem.append(complextype)
+                        xsd_elem.append(elem)
+                # assumed stats_spec
+                else:
+                    if stats_spec['item_type'] == 'map':
+                        alltag = xml.etree.ElementTree.Element("all")
+                        stats_spec2xsd(stats_spec['map_item_spec'], alltag)
+                        complextype = xml.etree.ElementTree.Element("complexType")
+                        complextype.append(alltag)
+                        elem = xml.etree.ElementTree.Element(
+                            "element", attrib={ "name" : stats_spec["item_name"],
+                                                "minOccurs": "0" \
+                                                    if stats_spec["item_optional"] \
+                                                    else "1",
+                                                "maxOccurs": "unbounded" })
+                        elem.append(complextype)
+                        xsd_elem.append(elem)
+                    elif stats_spec['item_type'] == 'list':
+                        alltag = xml.etree.ElementTree.Element("sequence")
+                        stats_spec2xsd(stats_spec['list_item_spec'], alltag)
+                        complextype = xml.etree.ElementTree.Element("complexType")
+                        complextype.append(alltag)
+                        elem = xml.etree.ElementTree.Element(
+                            "element", attrib={ "name" : stats_spec["item_name"],
+                                                "minOccurs": "0" \
+                                                    if stats_spec["item_optional"] \
+                                                    else "1",
+                                                "maxOccurs": "1" })
+                        elem.append(complextype)
+                        xsd_elem.append(elem)
+                    else:
+                        # determine the datatype of XSD
+                        # TODO: Should consider other item_format types
+                        datatype = stats_spec["item_type"] \
+                            if stats_spec["item_type"].lower() != 'real' \
+                            else 'float'
+                        if "item_format" in stats_spec:
+                            item_format = stats_spec["item_format"]
+                            if datatype.lower() == 'string' \
+                                    and item_format.lower() == 'date-time':
+                                 datatype = 'dateTime'
+                            elif datatype.lower() == 'string' \
+                                    and (item_format.lower() == 'date' \
+                                             or item_format.lower() == 'time'):
+                                 datatype = item_format.lower()
+                        elem = xml.etree.ElementTree.Element(
+                            "element",
+                            attrib={
+                                'name' : stats_spec["item_name"],
+                                'type' : datatype,
+                                'minOccurs' : "0" \
+                                    if stats_spec["item_optional"] \
+                                    else "1",
+                                'maxOccurs' : "1"
+                                }
+                            )
+                        annotation = xml.etree.ElementTree.Element("annotation")
+                        appinfo = xml.etree.ElementTree.Element("appinfo")
+                        documentation = xml.etree.ElementTree.Element("documentation")
+                        if "item_title" in stats_spec:
+                            appinfo.text = stats_spec["item_title"]
+                        if "item_description" in stats_spec:
+                            documentation.text = stats_spec["item_description"]
+                        annotation.append(appinfo)
+                        annotation.append(documentation)
+                        elem.append(annotation)
+                        xsd_elem.append(elem)
+            # multiple stats_specs
+            elif type(stats_spec) is list:
+                for item_spec in stats_spec:
+                    stats_spec2xsd(item_spec, xsd_elem)
+
         # for XSD
-        xsd_root = xml.etree.ElementTree.Element("all") # started with "all" tag
-        for (mod, spec) in self.get_stats_spec().items():
-            if not spec: continue
-            alltag = xml.etree.ElementTree.Element("all")
-            for item in spec:
-                element = xml.etree.ElementTree.Element(
-                    "element",
-                    dict( name=item["item_name"],
-                          type=item["item_type"] if item["item_type"].lower() != 'real' else 'float',
-                          minOccurs="1",
-                          maxOccurs="1" ),
-                    )
-                annotation = xml.etree.ElementTree.Element("annotation")
-                appinfo = xml.etree.ElementTree.Element("appinfo")
-                documentation = xml.etree.ElementTree.Element("documentation")
-                appinfo.text = item["item_title"]
-                documentation.text = item["item_description"]
-                annotation.append(appinfo)
-                annotation.append(documentation)
-                element.append(annotation)
-                alltag.append(element)
-
-            complextype = xml.etree.ElementTree.Element("complexType")
-            complextype.append(alltag)
-            mod_element = xml.etree.ElementTree.Element("element", { "name" : mod })
-            mod_element.append(complextype)
-            xsd_root.append(mod_element)
+        stats_spec = self.get_stats_spec(module_name, item_name)
+        alltag = xml.etree.ElementTree.Element("all")
+        stats_spec2xsd(stats_spec, alltag)
+        complextype = xml.etree.ElementTree.Element("complexType")
+        complextype.append(alltag)
+        documentation = xml.etree.ElementTree.Element("documentation")
+        documentation.text = "A set of statistics data"
+        annotation = xml.etree.ElementTree.Element("annotation")
+        annotation.append(documentation)
+        elem = xml.etree.ElementTree.Element(
+            "element", attrib={ 'name' : 'statistics' })
+        elem.append(annotation)
+        elem.append(complextype)
+        documentation = xml.etree.ElementTree.Element("documentation")
+        documentation.text = "XML schema of the statistics data in BIND 10"
+        annotation = xml.etree.ElementTree.Element("annotation")
+        annotation.append(documentation)
+        xsd_root = xml.etree.ElementTree.Element(
+            "schema",
+            attrib={ 'xmlns' : "http://www.w3.org/2001/XMLSchema",
+                     'targetNamespace' : XSD_NAMESPACE,
+                     'xmlns:bind10' : XSD_NAMESPACE })
+        xsd_root.append(annotation)
+        xsd_root.append(elem)
         # The coding conversion is tricky. xml..tostring() of Python 3.2
         # returns bytes (not string) regardless of the coding, while
         # tostring() of Python 3.1 returns a string.  To support both
         # cases transparently, we first make sure tostring() returns
         # bytes by specifying utf-8 and then convert the result to a
         # plain string (code below assume it).
+        # FIXME: Non-ASCII characters might be lost here. Consider how
+        # the whole system should handle non-ASCII characters.
         xsd_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
                          encoding='us-ascii')
         self.xsd_body = self.open_template(XSD_TEMPLATE_LOCATION).substitute(
-            xsd_string=xsd_string,
-            xsd_namespace=XSD_NAMESPACE
-            )
+            xsd_string=xsd_string)
         assert self.xsd_body is not None
         return self.xsd_body
 
-    def xsl_handler(self):
-        """Handler which just returns the body of XSL document"""
+    def xsl_handler(self, module_name=None, item_name=None):
+        """Requests the specified statistics specification by using
+        the function get_stats_spec respectively and loads the XSL
+        template file and returns the string of the XSL document.The
+        first argument is the module name which owns the statistics
+        data, the second argument is one name of the statistics items
+        which the the module owns. The second argument cannot be
+        specified when the first argument is not specified."""
+
+        # TODO: Separate the following recursive function by type of
+        # the parameter. Because we should be sure what type there is
+        # when we call it recursively.
+        def stats_spec2xsl(stats_spec, xsl_elem, path=XML_URL_PATH):
+            """Internal use for xsl_handler. Reads stats_spec
+            specified as first arguments, and modify the xml object
+            specified as second argument. xsl_elem must be
+            modified. The third argument is a base path used for
+            making anchor tag in XSL. Always returns None with no
+            exceptions."""
+            # assumed module_spec or one stats_spec
+            if type(stats_spec) is dict:
+                # assumed module_spec
+                if 'item_name' not in stats_spec:
+                    table = xml.etree.ElementTree.Element("table")
+                    tr = xml.etree.ElementTree.Element("tr")
+                    th = xml.etree.ElementTree.Element("th")
+                    th.text = "Module Name"
+                    tr.append(th)
+                    th = xml.etree.ElementTree.Element("th")
+                    th.text = "Module Item"
+                    tr.append(th)
+                    table.append(tr)
+                    for mod in stats_spec.keys():
+                        foreach = xml.etree.ElementTree.Element(
+                            "xsl:for-each", attrib={ "select" : mod })
+                        tr = xml.etree.ElementTree.Element("tr")
+                        td = xml.etree.ElementTree.Element("td")
+                        a = xml.etree.ElementTree.Element(
+                            "a", attrib={ "href": urllib.parse.quote(path + "/" + mod) })
+                        a.text = mod
+                        td.append(a)
+                        tr.append(td)
+                        td = xml.etree.ElementTree.Element("td")
+                        stats_spec2xsl(stats_spec[mod], td,
+                                       path + "/" + mod)
+                        tr.append(td)
+                        foreach.append(tr)
+                        table.append(foreach)
+                    xsl_elem.append(table)
+                # assumed stats_spec
+                else:
+                    if stats_spec['item_type'] == 'map':
+                        table = xml.etree.ElementTree.Element("table")
+                        tr = xml.etree.ElementTree.Element("tr")
+                        th = xml.etree.ElementTree.Element("th")
+                        th.text = "Item Name"
+                        tr.append(th)
+                        th = xml.etree.ElementTree.Element("th")
+                        th.text = "Item Value"
+                        tr.append(th)
+                        table.append(tr)
+                        foreach = xml.etree.ElementTree.Element(
+                            "xsl:for-each", attrib={ "select" : stats_spec['item_name'] })
+                        tr = xml.etree.ElementTree.Element("tr")
+                        td = xml.etree.ElementTree.Element(
+                            "td",
+                            attrib={ "class" : "title",
+                                     "title" : stats_spec["item_description"] \
+                                         if "item_description" in stats_spec \
+                                         else "" })
+                        # TODO: Consider whether we should always use
+                        # the identical name "item_name" for the
+                        # user-visible name in XSL.
+                        td.text = stats_spec[ "item_title" if "item_title" in stats_spec else "item_name" ]
+                        tr.append(td)
+                        td = xml.etree.ElementTree.Element("td")
+                        stats_spec2xsl(stats_spec['map_item_spec'], td,
+                                       path + "/" + stats_spec["item_name"])
+                        tr.append(td)
+                        foreach.append(tr)
+                        table.append(foreach)
+                        xsl_elem.append(table)
+                    elif stats_spec['item_type'] == 'list':
+                        stats_spec2xsl(stats_spec['list_item_spec'], xsl_elem,
+                                       path + "/" + stats_spec["item_name"])
+                    else:
+                        xsl_valueof = xml.etree.ElementTree.Element(
+                            "xsl:value-of",
+                            attrib={'select': stats_spec["item_name"]})
+                        xsl_elem.append(xsl_valueof)
+
+            # multiple stats_specs
+            elif type(stats_spec) is list:
+                table = xml.etree.ElementTree.Element("table")
+                tr = xml.etree.ElementTree.Element("tr")
+                th = xml.etree.ElementTree.Element("th")
+                th.text = "Item Name"
+                tr.append(th)
+                th = xml.etree.ElementTree.Element("th")
+                th.text = "Item Value"
+                tr.append(th)
+                table.append(tr)
+                for item_spec in stats_spec:
+                    tr = xml.etree.ElementTree.Element("tr")
+                    td = xml.etree.ElementTree.Element(
+                        "td",
+                        attrib={ "class" : "title",
+                                 "title" : item_spec["item_description"] \
+                                     if "item_description" in item_spec \
+                                     else "" })
+                    # if the path length is equal to or shorter than
+                    # XML_URL_PATH + /Module/Item, add the anchor tag.
+                    if len(path.split('/')) <= len((XML_URL_PATH + '/Module/Item').split('/')):
+                        a = xml.etree.ElementTree.Element(
+                            "a", attrib={ "href": urllib.parse.quote(path + "/" + item_spec["item_name"]) })
+                        a.text = item_spec[ "item_title" if "item_title" in item_spec else "item_name" ]
+                        td.append(a)
+                    else:
+                        td.text = item_spec[ "item_title" if "item_title" in item_spec else "item_name" ]
+                    tr.append(td)
+                    td = xml.etree.ElementTree.Element("td")
+                    stats_spec2xsl(item_spec, td, path)
+                    tr.append(td)
+                    if item_spec['item_type'] == 'list':
+                        foreach = xml.etree.ElementTree.Element(
+                            "xsl:for-each", attrib={ "select" : item_spec['item_name'] })
+                        foreach.append(tr)
+                        table.append(foreach)
+                    else:
+                        table.append(tr)
+                xsl_elem.append(table)
+
         # for XSL
-        xsd_root = xml.etree.ElementTree.Element(
+        stats_spec = self.get_stats_spec(module_name, item_name)
+        xsd_root = xml.etree.ElementTree.Element( # started with xml:template tag
             "xsl:template",
-            dict(match="*")) # started with xml:template tag
-        for (mod, spec) in self.get_stats_spec().items():
-            if not spec: continue
-            for item in spec:
-                tr = xml.etree.ElementTree.Element("tr")
-                td0 = xml.etree.ElementTree.Element("td")
-                td0.text = str(mod)
-                td1 = xml.etree.ElementTree.Element(
-                    "td", { "class" : "title",
-                            "title" : item["item_description"] })
-                td1.text = item["item_title"]
-                td2 = xml.etree.ElementTree.Element("td")
-                xsl_valueof = xml.etree.ElementTree.Element(
-                    "xsl:value-of",
-                    dict(select=mod+'/'+item["item_name"]))
-                td2.append(xsl_valueof)
-                tr.append(td0)
-                tr.append(td1)
-                tr.append(td2)
-                xsd_root.append(tr)
+            attrib={'match': "bind10:statistics"})
+        stats_spec2xsl(stats_spec, xsd_root)
         # The coding conversion is tricky. xml..tostring() of Python 3.2
         # returns bytes (not string) regardless of the coding, while
         # tostring() of Python 3.1 returns a string.  To support both
         # cases transparently, we first make sure tostring() returns
         # bytes by specifying utf-8 and then convert the result to a
         # plain string (code below assume it).
+        # FIXME: Non-ASCII characters might be lost here. Consider how
+        # the whole system should handle non-ASCII characters.
         xsl_string = str(xml.etree.ElementTree.tostring(xsd_root, encoding='utf-8'),
                          encoding='us-ascii')
         self.xsl_body = self.open_template(XSL_TEMPLATE_LOCATION).substitute(
diff --git a/src/bin/stats/stats_httpd_messages.mes b/src/bin/stats/stats_httpd_messages.mes
index 0e984dc..dbd0650 100644
--- a/src/bin/stats/stats_httpd_messages.mes
+++ b/src/bin/stats/stats_httpd_messages.mes
@@ -55,6 +55,12 @@ response will be sent back, and the specific error is printed. This
 is an error condition that likely points to a module that is not
 responding correctly to statistic requests.
 
+% STATHTTPD_SERVER_DATAERROR HTTP server data error: %1
+An internal error occurred while handling an HTTP request. An HTTP 404
+response will be sent back, and the specific error is printed. This
+is an error condition that likely points the specified data
+corresponding to the requested URI is incorrect.
+
 % STATHTTPD_SERVER_INIT_ERROR HTTP server initialization error: %1
 There was a problem initializing the HTTP server in the stats-httpd
 module upon receiving its configuration data. The most likely cause
diff --git a/src/bin/stats/tests/Makefile.am b/src/bin/stats/tests/Makefile.am
index afd572f..01254d4 100644
--- a/src/bin/stats/tests/Makefile.am
+++ b/src/bin/stats/tests/Makefile.am
@@ -1,7 +1,7 @@
 PYCOVERAGE_RUN = @PYCOVERAGE_RUN@
 PYTESTS = b10-stats_test.py b10-stats-httpd_test.py
 EXTRA_DIST = $(PYTESTS) test_utils.py
-CLEANFILES = test_utils.pyc msgq_socket_test
+CLEANFILES = test_utils.pyc
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
diff --git a/src/bin/stats/tests/b10-stats-httpd_test.py b/src/bin/stats/tests/b10-stats-httpd_test.py
index e867080..b6847bd 100644
--- a/src/bin/stats/tests/b10-stats-httpd_test.py
+++ b/src/bin/stats/tests/b10-stats-httpd_test.py
@@ -45,7 +45,12 @@ DUMMY_DATA = {
         },
     'Auth' : {
         "queries.tcp": 2,
-        "queries.udp": 3
+        "queries.udp": 3,
+        "queries.perzone": [{
+                "zonename": "test.example",
+                "queries.tcp": 2,
+                "queries.udp": 3
+                }]
         },
     'Stats' : {
         "report_time": "2011-03-04T11:59:19Z",
@@ -129,68 +134,295 @@ class TestHttpHandler(unittest.TestCase):
         self.assertEqual(len(self.stats_httpd.httpd), 1)
         self.assertEqual((self.address, self.port), self.stats_httpd.http_addrs[0])
 
-        # URL is '/bind10/statistics/xml'
-        self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
-        self.client.endheaders()
-        response = self.client.getresponse()
-        self.assertEqual(response.getheader("Content-type"), "text/xml")
-        self.assertTrue(int(response.getheader("Content-Length")) > 0)
-        self.assertEqual(response.status, 200)
-        root = xml.etree.ElementTree.parse(response).getroot()
-        self.assertTrue(root.tag.find('stats_data') > 0)
-        for (k,v) in root.attrib.items():
-            if k.find('schemaLocation') > 0:
-                self.assertEqual(v, stats_httpd.XSD_NAMESPACE + ' ' + stats_httpd.XSD_URL_PATH)
-        for mod in DUMMY_DATA:
-            for (item, value) in DUMMY_DATA[mod].items():
+        def check_XML_URL_PATH(mod=None, item=None):
+            url_path = stats_httpd.XML_URL_PATH
+            if mod is not None:
+                url_path = url_path + '/' + mod
+                if item is not None:
+                    url_path = url_path + '/' + item
+            self.client.putrequest('GET', url_path)
+            self.client.endheaders()
+            response = self.client.getresponse()
+            self.assertEqual(response.getheader("Content-type"), "text/xml")
+            self.assertTrue(int(response.getheader("Content-Length")) > 0)
+            self.assertEqual(response.status, 200)
+            xml_doctype = response.readline().decode()
+            xsl_doctype = response.readline().decode()
+            self.assertTrue(len(xml_doctype) > 0)
+            self.assertTrue(len(xsl_doctype) > 0)
+            root = xml.etree.ElementTree.parse(response).getroot()
+            self.assertTrue(root.tag.find('statistics') > 0)
+            schema_loc = '{http://www.w3.org/2001/XMLSchema-instance}schemaLocation'
+            if item is None and mod is None:
+                # check the path of XSD
+                self.assertEqual(root.attrib[schema_loc],
+                                 stats_httpd.XSD_NAMESPACE + ' '
+                                 + stats_httpd.XSD_URL_PATH)
+                # check the path of XSL
+                self.assertTrue(xsl_doctype.startswith(
+                        '<?xml-stylesheet type="text/xsl" href="' + 
+                        stats_httpd.XSL_URL_PATH
+                        + '"?>'))
+                for m in DUMMY_DATA:
+                    for k in DUMMY_DATA[m].keys():
+                        self.assertIsNotNone(root.find(m + '/' + k))
+                        itm = root.find(m + '/' + k)
+                        if type(DUMMY_DATA[m][k]) is list:
+                            for v in DUMMY_DATA[m][k]:
+                                for i in v:
+                                    self.assertIsNotNone(itm.find('zones/' + i))
+            elif item is None:
+                # check the path of XSD
+                self.assertEqual(root.attrib[schema_loc],
+                                 stats_httpd.XSD_NAMESPACE + ' '
+                                 + stats_httpd.XSD_URL_PATH + '/' + mod)
+                # check the path of XSL
+                self.assertTrue(xsl_doctype.startswith( 
+                                 '<?xml-stylesheet type="text/xsl" href="'
+                                 + stats_httpd.XSL_URL_PATH + '/' + mod
+                                 + '"?>'))
+                for k in DUMMY_DATA[mod].keys():
+                    self.assertIsNotNone(root.find(mod + '/' + k))
+                    itm = root.find(mod + '/' + k)
+                    self.assertIsNotNone(itm)
+                    if type(DUMMY_DATA[mod][k]) is list:
+                        for v in DUMMY_DATA[mod][k]:
+                            for i in v:
+                                self.assertIsNotNone(itm.find('zones/' + i))
+            else:
+                # check the path of XSD
+                self.assertEqual(root.attrib[schema_loc],
+                                 stats_httpd.XSD_NAMESPACE + ' '
+                                 + stats_httpd.XSD_URL_PATH + '/' + mod + '/' + item)
+                # check the path of XSL
+                self.assertTrue(xsl_doctype.startswith( 
+                                 '<?xml-stylesheet type="text/xsl" href="'
+                                 + stats_httpd.XSL_URL_PATH + '/' + mod + '/' + item
+                                 + '"?>'))
                 self.assertIsNotNone(root.find(mod + '/' + item))
 
-        # URL is '/bind10/statitics/xsd'
-        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
-        self.client.endheaders()
-        response = self.client.getresponse()
-        self.assertEqual(response.getheader("Content-type"), "text/xml")
-        self.assertTrue(int(response.getheader("Content-Length")) > 0)
-        self.assertEqual(response.status, 200)
-        root = xml.etree.ElementTree.parse(response).getroot()
-        url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
-        tags = [ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ]
-        xsdpath = '/'.join(tags)
-        self.assertTrue(root.tag.find('schema') > 0)
-        self.assertTrue(hasattr(root, 'attrib'))
-        self.assertTrue('targetNamespace' in root.attrib)
-        self.assertEqual(root.attrib['targetNamespace'],
-                         stats_httpd.XSD_NAMESPACE)
-        for elm in root.findall(xsdpath):
-            self.assertIsNotNone(elm.attrib['name'])
-            self.assertTrue(elm.attrib['name'] in DUMMY_DATA)
-
-        # URL is '/bind10/statitics/xsl'
-        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
-        self.client.endheaders()
-        response = self.client.getresponse()
-        self.assertEqual(response.getheader("Content-type"), "text/xml")
-        self.assertTrue(int(response.getheader("Content-Length")) > 0)
-        self.assertEqual(response.status, 200)
-        root = xml.etree.ElementTree.parse(response).getroot()
-        url_trans = '{http://www.w3.org/1999/XSL/Transform}'
-        url_xhtml = '{http://www.w3.org/1999/xhtml}'
-        xslpath = url_trans + 'template/' + url_xhtml + 'tr'
-        self.assertEqual(root.tag, url_trans + 'stylesheet')
-        for tr in root.findall(xslpath):
-            tds = tr.findall(url_xhtml + 'td')
-            self.assertIsNotNone(tds)
-            self.assertEqual(type(tds), list)
-            self.assertTrue(len(tds) > 2)
-            self.assertTrue(hasattr(tds[0], 'text'))
-            self.assertTrue(tds[0].text in DUMMY_DATA)
-            valueof = tds[2].find(url_trans + 'value-of')
-            self.assertIsNotNone(valueof)
-            self.assertTrue(hasattr(valueof, 'attrib'))
-            self.assertIsNotNone(valueof.attrib)
-            self.assertTrue('select' in valueof.attrib)
-            self.assertTrue(valueof.attrib['select'] in \
-                                [ tds[0].text+'/'+item for item in DUMMY_DATA[tds[0].text].keys() ])
+        # URL is '/bind10/statistics/xml'
+        check_XML_URL_PATH(mod=None, item=None)
+        for m in DUMMY_DATA:
+            # URL is '/bind10/statistics/xml/Module'
+            check_XML_URL_PATH(mod=m)
+            for k in DUMMY_DATA[m].keys():
+                # URL is '/bind10/statistics/xml/Module/Item'
+                check_XML_URL_PATH(mod=m, item=k)
+
+        def check_XSD_URL_PATH(mod=None, item=None):
+            url_path = stats_httpd.XSD_URL_PATH
+            if mod is not None:
+                url_path = url_path + '/' + mod
+                if item is not None:
+                    url_path = url_path + '/' + item
+            self.client.putrequest('GET', url_path)
+            self.client.endheaders()
+            response = self.client.getresponse()
+            self.assertEqual(response.getheader("Content-type"), "text/xml")
+            self.assertTrue(int(response.getheader("Content-Length")) > 0)
+            self.assertEqual(response.status, 200)
+            root = xml.etree.ElementTree.parse(response).getroot()
+            url_xmlschema = '{http://www.w3.org/2001/XMLSchema}'
+            self.assertTrue(root.tag.find('schema') > 0)
+            self.assertTrue(hasattr(root, 'attrib'))
+            self.assertTrue('targetNamespace' in root.attrib)
+            self.assertEqual(root.attrib['targetNamespace'],
+                             stats_httpd.XSD_NAMESPACE)
+            if mod is None and item is None:
+                for (mod, itm) in DUMMY_DATA.items():
+                    xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+                    mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+                    self.assertTrue(mod in mod_elm)
+                    for (it, val) in itm.items():
+                        xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                        itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+                        self.assertTrue(it in itm_elm)
+                        if type(val) is list:
+                            xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+                            itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[it].findall(xsdpath) ])
+                            self.assertTrue('zones' in itm_elm2)
+                            for i in val:
+                                for k in i.keys():
+                                    xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                                    self.assertTrue(
+                                        k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+            elif item is None:
+                xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+                mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+                self.assertTrue(mod in mod_elm)
+                for (it, val) in DUMMY_DATA[mod].items():
+                    xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                    itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+                    self.assertTrue(it in itm_elm)
+                    if type(val) is list:
+                        xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+                        itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[it].findall(xsdpath) ])
+                        self.assertTrue('zones' in itm_elm2)
+                        for i in val:
+                            for k in i.keys():
+                                xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                                self.assertTrue(
+                                    k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+            else:
+                xsdpath = '/'.join([ url_xmlschema + t for t in [ 'element', 'complexType', 'all', 'element' ] ])
+                mod_elm = dict([ (elm.attrib['name'], elm) for elm in root.findall(xsdpath) ])
+                self.assertTrue(mod in mod_elm)
+                xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                itm_elm = dict([ (elm.attrib['name'], elm) for elm in mod_elm[mod].findall(xsdpath) ])
+                self.assertTrue(item in itm_elm)
+                if type(DUMMY_DATA[mod][item]) is list:
+                    xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'sequence', 'element' ] ])
+                    itm_elm2 = dict([ (elm.attrib['name'], elm) for elm in itm_elm[item].findall(xsdpath) ])
+                    self.assertTrue('zones' in itm_elm2)
+                    for i in DUMMY_DATA[mod][item]:
+                        for k in i.keys():
+                            xsdpath = '/'.join([ url_xmlschema + t for t in [ 'complexType', 'all', 'element' ] ])
+                            self.assertTrue(
+                                k in [ elm.attrib['name'] for elm in itm_elm2['zones'].findall(xsdpath) ])
+
+        # URL is '/bind10/statistics/xsd'
+        check_XSD_URL_PATH(mod=None, item=None)
+        for m in DUMMY_DATA:
+            # URL is '/bind10/statistics/xsd/Module'
+            check_XSD_URL_PATH(mod=m)
+            for k in DUMMY_DATA[m].keys():
+                # URL is '/bind10/statistics/xsd/Module/Item'
+                check_XSD_URL_PATH(mod=m, item=k)
+
+        def check_XSL_URL_PATH(mod=None, item=None):
+            url_path = stats_httpd.XSL_URL_PATH
+            if mod is not None:
+                url_path = url_path + '/' + mod
+                if item is not None:
+                    url_path = url_path + '/' + item
+            self.client.putrequest('GET', url_path)
+            self.client.endheaders()
+            response = self.client.getresponse()
+            self.assertEqual(response.getheader("Content-type"), "text/xml")
+            self.assertTrue(int(response.getheader("Content-Length")) > 0)
+            self.assertEqual(response.status, 200)
+            root = xml.etree.ElementTree.parse(response).getroot()
+            url_trans = '{http://www.w3.org/1999/XSL/Transform}'
+            url_xhtml = '{http://www.w3.org/1999/xhtml}'
+            self.assertEqual(root.tag, url_trans + 'stylesheet')
+            if item is None and mod is None:
+                xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+                mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+                for (mod, itms) in DUMMY_DATA.items():
+                    self.assertTrue(mod in mod_fe)
+                    for (k, v) in itms.items():
+                        if type(v) is list:
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_trans + 'for-each'
+                            itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+                            self.assertTrue(k in itm_fe)
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'a'
+                            itm_a = [ x.attrib['href'] for x in itm_fe[k].findall(xslpath) ]
+                            self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+                            for itms in v:
+                                xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                    + url_xhtml + 'table/' + url_trans + 'for-each'
+                                itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[k].findall(xslpath) ])
+                                self.assertTrue('zones' in itm_fe)
+                                for (k, v) in itms.items():
+                                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                        + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                        + url_xhtml + 'td/' + url_trans + 'value-of'
+                                    itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+                                    self.assertTrue(k in itm_vo)
+                        else:
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                + url_xhtml + 'td/' + url_trans + 'value-of'
+                            itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+                            self.assertTrue(k in itm_vo)
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                + url_xhtml + 'td/' + url_xhtml + 'a'
+                            itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+                            self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+            elif item is None:
+                xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+                mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+                self.assertTrue(mod in mod_fe)
+                for (k, v) in DUMMY_DATA[mod].items():
+                    if type(v) is list:
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'table/' + url_trans + 'for-each'
+                        itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+                        self.assertTrue(k in itm_fe)
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'a'
+                        itm_a = [ x.attrib['href'] for x in itm_fe[k].findall(xslpath) ]
+                        self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+                        for itms in v:
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_trans + 'for-each'
+                            itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[k].findall(xslpath) ])
+                            self.assertTrue('zones' in itm_fe)
+                            for (k, v) in itms.items():
+                                xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                    + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                    + url_xhtml + 'td/' + url_trans + 'value-of'
+                                itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+                                self.assertTrue(k in itm_vo)
+                    else:
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                            + url_xhtml + 'td/' + url_trans + 'value-of'
+                        itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+                        self.assertTrue(k in itm_vo)
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                            + url_xhtml + 'td/' + url_xhtml + 'a'
+                        itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+                        self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + k in itm_a)
+            else:
+                xslpath = url_trans + 'template/' + url_xhtml + 'table/' + url_trans + 'for-each'
+                mod_fe = dict([ (x.attrib['select'], x) for x in root.findall(xslpath) ])
+                self.assertTrue(mod in mod_fe)
+                if type(DUMMY_DATA[mod][item]) is list:
+                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                        + url_xhtml + 'table/' + url_trans + 'for-each'
+                    itm_fe = dict([ (x.attrib['select'], x) for x in mod_fe[mod].findall(xslpath) ])
+                    self.assertTrue(item in itm_fe)
+                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                        + url_xhtml + 'a'
+                    itm_a = [ x.attrib['href'] for x in itm_fe[item].findall(xslpath) ]
+                    self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + item in itm_a)
+                    for itms in DUMMY_DATA[mod][item]:
+                        xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                            + url_xhtml + 'table/' + url_trans + 'for-each'
+                        itm_fe = dict([ (x.attrib['select'], x) for x in itm_fe[item].findall(xslpath) ])
+                        self.assertTrue('zones' in itm_fe)
+                        for (k, v) in itms.items():
+                            xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                                + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                                + url_xhtml + 'td/' + url_trans + 'value-of'
+                            itm_vo = [ x.attrib['select'] for x in itm_fe['zones'].findall(xslpath) ]
+                            self.assertTrue(k in itm_vo)
+                else:
+                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                        + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                        + url_xhtml + 'td/' + url_trans + 'value-of'
+                    itm_vo = [ x.attrib['select'] for x in mod_fe[mod].findall(xslpath) ]
+                    self.assertTrue(item in itm_vo)
+                    xslpath = url_xhtml + 'tr/' + url_xhtml + 'td/' \
+                        + url_xhtml + 'table/' + url_xhtml + 'tr/' \
+                        + url_xhtml + 'td/' + url_xhtml + 'a'
+                    itm_a = [ x.attrib['href'] for x in mod_fe[mod].findall(xslpath) ]
+                    self.assertTrue(stats_httpd.XML_URL_PATH + '/' + mod + '/' + item in itm_a)
+
+        # URL is '/bind10/statistics/xsl'
+        check_XSL_URL_PATH(mod=None, item=None)
+        for m in DUMMY_DATA:
+            # URL is '/bind10/statistics/xsl/Module'
+            check_XSL_URL_PATH(mod=m)
+            for k in DUMMY_DATA[m].keys():
+                # URL is '/bind10/statistics/xsl/Module/Item'
+                check_XSL_URL_PATH(mod=m, item=k)
 
         # 302 redirect
         self.client._http_vsn_str = 'HTTP/1.1'
@@ -202,13 +434,102 @@ class TestHttpHandler(unittest.TestCase):
         self.assertEqual(response.getheader('Location'),
                          "http://%s:%d%s" % (self.address, self.port, stats_httpd.XML_URL_PATH))
 
-        # 404 NotFound
+        # 404 NotFound (random path)
         self.client._http_vsn_str = 'HTTP/1.0'
         self.client.putrequest('GET', '/path/to/foo/bar')
         self.client.endheaders()
         response = self.client.getresponse()
         self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', '/bind10/foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', '/bind10/statistics/foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + 'Auth') # with no slash
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # 200 ok
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 200)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '#foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 200)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '?foo=bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 200)
+
+        # 404 NotFound (too long path)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Boss/boot_time/a')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # 404 NotFound (nonexistent module name)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Foo')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+
+        # 404 NotFound (nonexistent item name)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Foo/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Foo/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Foo/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
 
+        # 404 NotFound (existent module but nonexistent item name)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XML_URL_PATH + '/Auth/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSD_URL_PATH + '/Auth/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
+        self.client._http_vsn_str = 'HTTP/1.0'
+        self.client.putrequest('GET', stats_httpd.XSL_URL_PATH + '/Auth/bar')
+        self.client.endheaders()
+        response = self.client.getresponse()
+        self.assertEqual(response.status, 404)
 
     def test_do_GET_failed1(self):
         # checks status
@@ -242,26 +563,26 @@ class TestHttpHandler(unittest.TestCase):
         # failure case(Stats replies an error)
         self.stats.mccs.set_command_handler(
             lambda cmd, args: \
-                isc.config.ccsession.create_answer(1, "I have an error.")
+                isc.config.ccsession.create_answer(1, "specified arguments are incorrect: I have an error.")
             )
 
         # request XML
         self.client.putrequest('GET', stats_httpd.XML_URL_PATH)
         self.client.endheaders()
         response = self.client.getresponse()
-        self.assertEqual(response.status, 500)
+        self.assertEqual(response.status, 404)
 
         # request XSD
         self.client.putrequest('GET', stats_httpd.XSD_URL_PATH)
         self.client.endheaders()
         response = self.client.getresponse()
-        self.assertEqual(response.status, 500)
+        self.assertEqual(response.status, 404)
 
         # request XSL
         self.client.putrequest('GET', stats_httpd.XSL_URL_PATH)
         self.client.endheaders()
         response = self.client.getresponse()
-        self.assertEqual(response.status, 500)
+        self.assertEqual(response.status, 404)
 
     def test_do_HEAD(self):
         self.client.putrequest('HEAD', stats_httpd.XML_URL_PATH)
@@ -306,12 +627,18 @@ class TestHttpServer(unittest.TestCase):
 class TestStatsHttpdError(unittest.TestCase):
     """Tests for StatsHttpdError exception"""
 
-    def test_raises(self):
+    def test_raises1(self):
         try:
             raise stats_httpd.StatsHttpdError('Nothing')
         except stats_httpd.StatsHttpdError as err:
             self.assertEqual(str(err), 'Nothing')
 
+    def test_raises2(self):
+        try:
+            raise stats_httpd.StatsHttpdDataError('Nothing')
+        except stats_httpd.StatsHttpdDataError as err:
+            self.assertEqual(str(err), 'Nothing')
+
 class TestStatsHttpd(unittest.TestCase):
     """Tests for StatsHttpd class"""
 
@@ -488,17 +815,13 @@ class TestStatsHttpd(unittest.TestCase):
         self.assertTrue(isinstance(tmpl, string.Template))
         opts = dict(
             xml_string="<dummy></dummy>",
-            xsd_namespace="http://host/path/to/",
-            xsd_url_path="/path/to/",
             xsl_url_path="/path/to/")
         lines = tmpl.substitute(opts)
         for n in opts:
             self.assertTrue(lines.find(opts[n])>0)
         tmpl = self.stats_httpd.open_template(stats_httpd.XSD_TEMPLATE_LOCATION)
         self.assertTrue(isinstance(tmpl, string.Template))
-        opts = dict(
-            xsd_string="<dummy></dummy>",
-            xsd_namespace="http://host/path/to/")
+        opts = dict(xsd_string="<dummy></dummy>")
         lines = tmpl.substitute(opts)
         for n in opts:
             self.assertTrue(lines.find(opts[n])>0)
@@ -580,26 +903,172 @@ class TestStatsHttpd(unittest.TestCase):
 
     def test_xml_handler(self):
         self.stats_httpd = MyStatsHttpd(get_availaddr())
-        self.stats_httpd.get_stats_data = lambda: \
-            { 'Dummy' : { 'foo':'bar' } }
+        self.stats_httpd.get_stats_spec = lambda x,y: \
+            { "Dummy" :
+                  [{
+                        "item_name": "foo",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "bar",
+                        "item_description": "foo is bar",
+                        "item_title": "Foo"
+                        },
+                   {
+                        "item_name": "foo2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Foo bar",
+                        "item_description": "Foo bar",
+                        "list_item_spec": {
+                            "item_name": "foo2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "foo2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Foo2 1 1",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 2",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 3",
+                                    "item_description": "Foo bar"
+                                    }
+                                ]
+                            }
+                        }]
+              }
+        self.stats_httpd.get_stats_data = lambda x,y: \
+            { 'Dummy' : { 'foo':'bar',
+                          'foo2': [
+                            {
+                                "foo2-1-1" : "bar1",
+                                "foo2-1-2" : 10,
+                                "foo2-1-3" : 9
+                                },
+                            {
+                                "foo2-1-1" : "bar2",
+                                "foo2-1-2" : 8,
+                                "foo2-1-3" : 7
+                                }
+                            ] } }
         xml_body1 = self.stats_httpd.open_template(
             stats_httpd.XML_TEMPLATE_LOCATION).substitute(
-            xml_string='<Dummy><foo>bar</foo></Dummy>',
-            xsd_namespace=stats_httpd.XSD_NAMESPACE,
-            xsd_url_path=stats_httpd.XSD_URL_PATH,
+            xml_string='<bind10:statistics xmlns:bind10="http://bind10.isc.org/bind10" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://bind10.isc.org/bind10 ' + stats_httpd.XSD_URL_PATH + '"><Dummy><foo>bar</foo><foo2><foo2-1><foo2-1-1>bar1</foo2-1-1><foo2-1-2>10</foo2-1-2><foo2-1-3>9</foo2-1-3></foo2-1><foo2-1><foo2-1-1>bar2</foo2-1-1><foo2-1-2>8</foo2-1-2><foo2-1-3>7</foo2-1-3></foo2-1></foo2></Dummy></bind10:statistics>',
             xsl_url_path=stats_httpd.XSL_URL_PATH)
         xml_body2 = self.stats_httpd.xml_handler()
         self.assertEqual(type(xml_body1), str)
         self.assertEqual(type(xml_body2), str)
         self.assertEqual(xml_body1, xml_body2)
-        self.stats_httpd.get_stats_data = lambda: \
-            { 'Dummy' : {'bar':'foo'} }
+        self.stats_httpd.get_stats_spec = lambda x,y: \
+            { "Dummy" :
+                  [{
+                        "item_name": "bar",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "foo",
+                        "item_description": "bar foo",
+                        "item_title": "Bar"
+                        },
+                   {
+                        "item_name": "bar2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Bar foo",
+                        "item_description": "Bar foo",
+                        "list_item_spec": {
+                            "item_name": "bar2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "bar2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Bar2 1 1",
+                                    "item_description": "Bar foo"
+                                    },
+                                {
+                                    "item_name": "bar2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Bar2 1 2",
+                                    "item_description": "Bar foo"
+                                    },
+                                {
+                                    "item_name": "bar2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Bar2 1 3",
+                                    "item_description": "Bar foo"
+                                    }
+                                ]
+                            }
+                        }]
+              }
+        self.stats_httpd.get_stats_data = lambda x,y: \
+            { 'Dummy' : { 'bar':'foo',
+                          'bar2': [
+                            {
+                                "bar2-1-1" : "foo1",
+                                "bar2-1-2" : 10,
+                                "bar2-1-3" : 9
+                                },
+                            {
+                                "bar2-1-1" : "foo2",
+                                "bar2-1-2" : 8,
+                                "bar2-1-3" : 7
+                                }
+                            ] } }
         xml_body2 = self.stats_httpd.xml_handler()
         self.assertNotEqual(xml_body1, xml_body2)
 
     def test_xsd_handler(self):
         self.stats_httpd = MyStatsHttpd(get_availaddr())
-        self.stats_httpd.get_stats_spec = lambda: \
+        self.stats_httpd.get_stats_spec = lambda x,y: \
             { "Dummy" :
                   [{
                         "item_name": "foo",
@@ -608,23 +1077,76 @@ class TestStatsHttpd(unittest.TestCase):
                         "item_default": "bar",
                         "item_description": "foo is bar",
                         "item_title": "Foo"
+                        },
+                   {
+                        "item_name": "hoo_time",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "2011-01-01T01:01:01Z",
+                        "item_description": "hoo time",
+                        "item_title": "Hoo Time",
+                        "item_format": "date-time"
+                        },
+                   {
+                        "item_name": "foo2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Foo bar",
+                        "item_description": "Foo bar",
+                        "list_item_spec": {
+                            "item_name": "foo2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "foo2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Foo2 1 1",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 2",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 3",
+                                    "item_description": "Foo bar"
+                                    }
+                                ]
+                            }
                         }]
               }
         xsd_body1 = self.stats_httpd.open_template(
             stats_httpd.XSD_TEMPLATE_LOCATION).substitute(
-            xsd_string=\
-                '<all><element name="Dummy"><complexType><all>' \
-                + '<element maxOccurs="1" minOccurs="1" name="foo" type="string">' \
-                + '<annotation><appinfo>Foo</appinfo>' \
-                + '<documentation>foo is bar</documentation>' \
-                + '</annotation></element></all>' \
-                + '</complexType></element></all>',
-            xsd_namespace=stats_httpd.XSD_NAMESPACE)
+            xsd_string='<schema targetNamespace="' + stats_httpd.XSD_NAMESPACE + '" xmlns="http://www.w3.org/2001/XMLSchema" xmlns:bind10="' + stats_httpd.XSD_NAMESPACE + '"><annotation><documentation>XML schema of the statistics data in BIND 10</documentation></annotation><element name="statistics"><annotation><documentation>A set of statistics data</documentation></annotation><complexType><all><element name="Dummy"><complexType><all><element maxOccurs="1" minOccurs="1" name="foo" type="string"><annotation><appinfo>Foo</appinfo><documentation>foo is bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="hoo_time" type="dateTime"><annotation><appinfo>Hoo Time</appinfo><documentation>hoo time</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2"><complexType><sequence><element maxOccurs="unbounded" minOccurs="1" name="foo2-1"><complexType><all><element maxOccurs="1" minOccurs="1" name="foo2-1-1" type="string"><ann
 otation><appinfo>Foo2 1 1</appinfo><documentation>Foo bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2-1-2" type="integer"><annotation><appinfo>Foo2 1 2</appinfo><documentation>Foo bar</documentation></annotation></element><element maxOccurs="1" minOccurs="1" name="foo2-1-3" type="integer"><annotation><appinfo>Foo2 1 3</appinfo><documentation>Foo bar</documentation></annotation></element></all></complexType></element></sequence></complexType></element></all></complexType></element></all></complexType></element></schema>')
         xsd_body2 = self.stats_httpd.xsd_handler()
         self.assertEqual(type(xsd_body1), str)
         self.assertEqual(type(xsd_body2), str)
         self.assertEqual(xsd_body1, xsd_body2)
-        self.stats_httpd.get_stats_spec = lambda: \
+        self.stats_httpd.get_stats_spec = lambda x,y: \
             { "Dummy" :
                   [{
                         "item_name": "bar",
@@ -633,6 +1155,66 @@ class TestStatsHttpd(unittest.TestCase):
                         "item_default": "foo",
                         "item_description": "bar is foo",
                         "item_title": "bar"
+                        },
+                   {
+                        "item_name": "boo_time",
+                        "item_type": "string",
+                        "item_optional": False,
+                        "item_default": "2012-02-02T02:02:02Z",
+                        "item_description": "boo time",
+                        "item_title": "Boo Time",
+                        "item_format": "date-time"
+                        },
+                   {
+                        "item_name": "foo2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Foo bar",
+                        "item_description": "Foo bar",
+                        "list_item_spec": {
+                            "item_name": "foo2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "foo2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Foo2 1 1",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 2",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 3",
+                                    "item_description": "Foo bar"
+                                    }
+                                ]
+                            }
                         }]
               }
         xsd_body2 = self.stats_httpd.xsd_handler()
@@ -640,30 +1222,77 @@ class TestStatsHttpd(unittest.TestCase):
 
     def test_xsl_handler(self):
         self.stats_httpd = MyStatsHttpd(get_availaddr())
-        self.stats_httpd.get_stats_spec = lambda: \
+        self.stats_httpd.get_stats_spec = lambda x,y: \
             { "Dummy" :
                   [{
                         "item_name": "foo",
                         "item_type": "string",
                         "item_optional": False,
                         "item_default": "bar",
-                        "item_description": "foo is bar",
+                        "item_description": "foo bar",
                         "item_title": "Foo"
+                        },
+                   {
+                        "item_name": "foo2",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Foo bar",
+                        "item_description": "Foo bar",
+                        "list_item_spec": {
+                            "item_name": "foo2-1",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "foo2-1-1",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Foo2 1 1",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-2",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 2",
+                                    "item_description": "Foo bar"
+                                    },
+                                {
+                                    "item_name": "foo2-1-3",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Foo2 1 3",
+                                    "item_description": "Foo bar"
+                                    }
+                                ]
+                            }
                         }]
               }
         xsl_body1 = self.stats_httpd.open_template(
             stats_httpd.XSL_TEMPLATE_LOCATION).substitute(
-            xsl_string='<xsl:template match="*"><tr>' \
-                + '<td>Dummy</td>' \
-                + '<td class="title" title="foo is bar">Foo</td>' \
-                + '<td><xsl:value-of select="Dummy/foo" /></td>' \
-                + '</tr></xsl:template>',
+            xsl_string='<xsl:template match="bind10:statistics"><table><tr><th>Module Name</th><th>Module Item</th></tr><xsl:for-each select="Dummy"><tr><td><a href="' + stats_httpd.XML_URL_PATH + '/Dummy">Dummy</a></td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><tr><td class="title" title="foo bar"><a href="' + stats_httpd.XML_URL_PATH + '/Dummy/foo">Foo</a></td><td><xsl:value-of select="foo" /></td></tr><xsl:for-each select="foo2"><tr><td class="title" title="Foo bar"><a href="' + stats_httpd.XML_URL_PATH + '/Dummy/foo2">Foo bar</a></td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><xsl:for-each select="foo2-1"><tr><td class="title" title="">foo2-1</td><td><table><tr><th>Item Name</th><th>Item Value</th></tr><tr><td class="title" title="Foo bar">Foo2 1 1</td><td><xsl:value-of select="foo2-1-1" /></td></tr><tr><td class="title" title="Foo bar">Foo2 1 2</td><td><xsl:value-of select="foo2-1-2" /></td></tr><tr><td class="title" title="Foo bar">Foo2 1 3
 </td><td><xsl:value-of select="foo2-1-3" /></td></tr></table></td></tr></xsl:for-each></table></td></tr></xsl:for-each></table></td></tr></xsl:for-each></table></xsl:template>',
             xsd_namespace=stats_httpd.XSD_NAMESPACE)
         xsl_body2 = self.stats_httpd.xsl_handler()
         self.assertEqual(type(xsl_body1), str)
         self.assertEqual(type(xsl_body2), str)
         self.assertEqual(xsl_body1, xsl_body2)
-        self.stats_httpd.get_stats_spec = lambda: \
+        self.stats_httpd.get_stats_spec = lambda x,y: \
             { "Dummy" :
                   [{
                         "item_name": "bar",
diff --git a/src/bin/stats/tests/b10-stats_test.py b/src/bin/stats/tests/b10-stats_test.py
index 3813c7e..3c8599a 100644
--- a/src/bin/stats/tests/b10-stats_test.py
+++ b/src/bin/stats/tests/b10-stats_test.py
@@ -226,7 +226,7 @@ class TestStats(unittest.TestCase):
                 'show', 'Stats',
                 params={ 'owner' : 'Boss',
                   'name'  : 'boot_time' }),
-            (0, self.const_datetime))
+            (0, {'Boss': {'boot_time': self.const_datetime}}))
         self.assertEqual(
             send_command(
                 'set', 'Stats',
@@ -238,7 +238,7 @@ class TestStats(unittest.TestCase):
                 'show', 'Stats',
                 params={ 'owner' : 'Boss',
                   'name'  : 'boot_time' }),
-            (0, self.const_datetime))
+            (0, {'Boss': {'boot_time': self.const_datetime}}))
         self.assertEqual(
             send_command('status', 'Stats'),
             (0, "Stats is up. (PID " + str(os.getpid()) + ")"))
@@ -321,25 +321,25 @@ class TestStats(unittest.TestCase):
         my_statistics_data = self.stats.get_statistics_data()
         self.assertTrue('Stats' in my_statistics_data)
         self.assertTrue('Boss' in my_statistics_data)
+        self.assertTrue('boot_time' in my_statistics_data['Boss'])
         my_statistics_data = self.stats.get_statistics_data(owner='Stats')
-        self.assertTrue('report_time' in my_statistics_data)
-        self.assertTrue('boot_time' in my_statistics_data)
-        self.assertTrue('last_update_time' in my_statistics_data)
-        self.assertTrue('timestamp' in my_statistics_data)
-        self.assertTrue('lname' in my_statistics_data)
+        self.assertTrue('Stats' in my_statistics_data)
+        self.assertTrue('report_time' in my_statistics_data['Stats'])
+        self.assertTrue('boot_time' in my_statistics_data['Stats'])
+        self.assertTrue('last_update_time' in my_statistics_data['Stats'])
+        self.assertTrue('timestamp' in my_statistics_data['Stats'])
+        self.assertTrue('lname' in my_statistics_data['Stats'])
         self.assertRaises(stats.StatsError, self.stats.get_statistics_data, owner='Foo')
-        my_statistics_data = self.stats.get_statistics_data(owner='Stats')
-        self.assertTrue('boot_time' in my_statistics_data)
         my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='report_time')
-        self.assertEqual(my_statistics_data, self.const_default_datetime)
+        self.assertEqual(my_statistics_data['Stats']['report_time'], self.const_default_datetime)
         my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='boot_time')
-        self.assertEqual(my_statistics_data, self.const_default_datetime)
+        self.assertEqual(my_statistics_data['Stats']['boot_time'], self.const_default_datetime)
         my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='last_update_time')
-        self.assertEqual(my_statistics_data, self.const_default_datetime)
+        self.assertEqual(my_statistics_data['Stats']['last_update_time'], self.const_default_datetime)
         my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='timestamp')
-        self.assertEqual(my_statistics_data, 0.0)
+        self.assertEqual(my_statistics_data['Stats']['timestamp'], 0.0)
         my_statistics_data = self.stats.get_statistics_data(owner='Stats', name='lname')
-        self.assertEqual(my_statistics_data, '')
+        self.assertEqual(my_statistics_data, {'Stats': {'lname':''}})
         self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
                           owner='Stats', name='Bar')
         self.assertRaises(stats.StatsError, self.stats.get_statistics_data,
@@ -385,10 +385,25 @@ class TestStats(unittest.TestCase):
                 1, "specified arguments are incorrect: owner: Foo, name: bar"))
         self.assertEqual(self.stats.command_show(owner='Auth'),
                          isc.config.create_answer(
-                0, {'queries.tcp': 0, 'queries.udp': 0}))
+                0, {'Auth':{ 'queries.udp': 0,
+                     'queries.tcp': 0,
+                     'queries.perzone': [{ 'zonename': 'test1.example',
+                                           'queries.udp': 1,
+                                           'queries.tcp': 2 },
+                                         { 'zonename': 'test2.example',
+                                           'queries.udp': 3,
+                                           'queries.tcp': 4 }] }}))
         self.assertEqual(self.stats.command_show(owner='Auth', name='queries.udp'),
                          isc.config.create_answer(
-                0, 0))
+                0, {'Auth': {'queries.udp':0}}))
+        self.assertEqual(self.stats.command_show(owner='Auth', name='queries.perzone'),
+                         isc.config.create_answer(
+                0, {'Auth': {'queries.perzone': [{ 'zonename': 'test1.example',
+                      'queries.udp': 1,
+                      'queries.tcp': 2 },
+                    { 'zonename': 'test2.example',
+                      'queries.udp': 3,
+                      'queries.tcp': 4 }]}}))
         orig_get_timestamp = stats.get_timestamp
         orig_get_datetime = stats.get_datetime
         stats.get_timestamp = lambda : self.const_timestamp
@@ -396,7 +411,7 @@ class TestStats(unittest.TestCase):
         self.assertEqual(stats.get_timestamp(), self.const_timestamp)
         self.assertEqual(stats.get_datetime(), self.const_datetime)
         self.assertEqual(self.stats.command_show(owner='Stats', name='report_time'), \
-                             isc.config.create_answer(0, self.const_datetime))
+                             isc.config.create_answer(0, {'Stats': {'report_time':self.const_datetime}}))
         self.assertEqual(self.stats.statistics_data['Stats']['timestamp'], self.const_timestamp)
         self.assertEqual(self.stats.statistics_data['Stats']['boot_time'], self.const_default_datetime)
         stats.get_timestamp = orig_get_timestamp
@@ -442,9 +457,12 @@ class TestStats(unittest.TestCase):
             self.assertTrue('item_format' in item)
 
         schema = value['Auth']
-        self.assertEqual(len(schema), 2)
+        self.assertEqual(len(schema), 3)
         for item in schema:
-            self.assertTrue(len(item) == 6)
+            if item['item_type'] == 'list':
+                self.assertEqual(len(item), 7)
+            else:
+                self.assertEqual(len(item), 6)
             self.assertTrue('item_name' in item)
             self.assertTrue('item_type' in item)
             self.assertTrue('item_optional' in item)
@@ -455,10 +473,10 @@ class TestStats(unittest.TestCase):
         (rcode, value) = isc.config.ccsession.parse_answer(
             self.stats.command_showschema(owner='Stats'))
         self.assertEqual(rcode, 0)
-        self.assertFalse('Stats' in value)
+        self.assertTrue('Stats' in value)
         self.assertFalse('Boss' in value)
         self.assertFalse('Auth' in value)
-        for item in value:
+        for item in value['Stats']:
             self.assertTrue(len(item) == 6 or len(item) == 7)
             self.assertTrue('item_name' in item)
             self.assertTrue('item_type' in item)
@@ -472,19 +490,19 @@ class TestStats(unittest.TestCase):
         (rcode, value) = isc.config.ccsession.parse_answer(
             self.stats.command_showschema(owner='Stats', name='report_time'))
         self.assertEqual(rcode, 0)
-        self.assertFalse('Stats' in value)
+        self.assertTrue('Stats' in value)
         self.assertFalse('Boss' in value)
         self.assertFalse('Auth' in value)
-        self.assertTrue(len(value) == 7)
-        self.assertTrue('item_name' in value)
-        self.assertTrue('item_type' in value)
-        self.assertTrue('item_optional' in value)
-        self.assertTrue('item_default' in value)
-        self.assertTrue('item_title' in value)
-        self.assertTrue('item_description' in value)
-        self.assertTrue('item_format' in value)
-        self.assertEqual(value['item_name'], 'report_time')
-        self.assertEqual(value['item_format'], 'date-time')
+        self.assertEqual(len(value['Stats'][0]), 7)
+        self.assertTrue('item_name' in value['Stats'][0])
+        self.assertTrue('item_type' in value['Stats'][0])
+        self.assertTrue('item_optional' in value['Stats'][0])
+        self.assertTrue('item_default' in value['Stats'][0])
+        self.assertTrue('item_title' in value['Stats'][0])
+        self.assertTrue('item_description' in value['Stats'][0])
+        self.assertTrue('item_format' in value['Stats'][0])
+        self.assertEqual(value['Stats'][0]['item_name'], 'report_time')
+        self.assertEqual(value['Stats'][0]['item_format'], 'date-time')
 
         self.assertEqual(self.stats.command_showschema(owner='Foo'),
                          isc.config.create_answer(
@@ -494,7 +512,7 @@ class TestStats(unittest.TestCase):
                 1, "specified arguments are incorrect: owner: Foo, name: bar"))
         self.assertEqual(self.stats.command_showschema(owner='Auth'),
                          isc.config.create_answer(
-                0, [{
+                0, {'Auth': [{
                         "item_default": 0,
                         "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
                         "item_name": "queries.tcp",
@@ -509,17 +527,121 @@ class TestStats(unittest.TestCase):
                         "item_optional": False,
                         "item_title": "Queries UDP",
                         "item_type": "integer"
-                        }]))
+                        },
+                    {
+                        "item_name": "queries.perzone",
+                        "item_type": "list",
+                        "item_optional": False,
+                        "item_default": [
+                            {
+                                "zonename" : "test1.example",
+                                "queries.udp" : 1,
+                                "queries.tcp" : 2
+                                },
+                            {
+                                "zonename" : "test2.example",
+                                "queries.udp" : 3,
+                                "queries.tcp" : 4
+                                }
+                        ],
+                        "item_title": "Queries per zone",
+                        "item_description": "Queries per zone",
+                        "list_item_spec": {
+                            "item_name": "zones",
+                            "item_type": "map",
+                            "item_optional": False,
+                            "item_default": {},
+                            "map_item_spec": [
+                                {
+                                    "item_name": "zonename",
+                                    "item_type": "string",
+                                    "item_optional": False,
+                                    "item_default": "",
+                                    "item_title": "Zonename",
+                                    "item_description": "Zonename"
+                                    },
+                                {
+                                    "item_name": "queries.udp",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Queries UDP per zone",
+                                    "item_description": "A number of UDP query counts per zone"
+                                    },
+                                {
+                                    "item_name": "queries.tcp",
+                                    "item_type": "integer",
+                                    "item_optional": False,
+                                    "item_default": 0,
+                                    "item_title": "Queries TCP per zone",
+                                    "item_description": "A number of TCP query counts per zone"
+                                    }
+                                ]
+                            }
+                        }]}))
         self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.tcp'),
                          isc.config.create_answer(
-                0, {
+                0, {'Auth': [{
                     "item_default": 0,
                     "item_description": "A number of total query counts which all auth servers receive over TCP since they started initially",
                     "item_name": "queries.tcp",
                     "item_optional": False,
                     "item_title": "Queries TCP",
                     "item_type": "integer"
-                    }))
+                    }]}))
+        self.assertEqual(self.stats.command_showschema(owner='Auth', name='queries.perzone'),
+                         isc.config.create_answer(
+                0, {'Auth':[{
+                    "item_name": "queries.perzone",
+                    "item_type": "list",
+                    "item_optional": False,
+                    "item_default": [
+                        {
+                            "zonename" : "test1.example",
+                            "queries.udp" : 1,
+                            "queries.tcp" : 2
+                            },
+                        {
+                            "zonename" : "test2.example",
+                            "queries.udp" : 3,
+                            "queries.tcp" : 4
+                            }
+                    ],
+                    "item_title": "Queries per zone",
+                    "item_description": "Queries per zone",
+                    "list_item_spec": {
+                        "item_name": "zones",
+                        "item_type": "map",
+                        "item_optional": False,
+                        "item_default": {},
+                        "map_item_spec": [
+                            {
+                                "item_name": "zonename",
+                                "item_type": "string",
+                                "item_optional": False,
+                                "item_default": "",
+                                "item_title": "Zonename",
+                                "item_description": "Zonename"
+                                },
+                            {
+                                "item_name": "queries.udp",
+                                "item_type": "integer",
+                                "item_optional": False,
+                                "item_default": 0,
+                                "item_title": "Queries UDP per zone",
+                                "item_description": "A number of UDP query counts per zone"
+                                },
+                            {
+                                "item_name": "queries.tcp",
+                                "item_type": "integer",
+                                "item_optional": False,
+                                "item_default": 0,
+                                "item_title": "Queries TCP per zone",
+                                "item_description": "A number of TCP query counts per zone"
+                                }
+                            ]
+                        }
+                    }]}))
 
         self.assertEqual(self.stats.command_showschema(owner='Stats', name='bar'),
                          isc.config.create_answer(
diff --git a/src/bin/stats/tests/test_utils.py b/src/bin/stats/tests/test_utils.py
index 5eb8f92..3f6ff33 100644
--- a/src/bin/stats/tests/test_utils.py
+++ b/src/bin/stats/tests/test_utils.py
@@ -232,6 +232,57 @@ class MockAuth:
         "item_default": 0,
         "item_title": "Queries UDP",
         "item_description": "A number of total query counts which all auth servers receive over UDP since they started initially"
+      },
+      {
+        "item_name": "queries.perzone",
+        "item_type": "list",
+        "item_optional": false,
+        "item_default": [
+          {
+            "zonename" : "test1.example",
+            "queries.udp" : 1,
+            "queries.tcp" : 2
+          },
+          {
+            "zonename" : "test2.example",
+            "queries.udp" : 3,
+            "queries.tcp" : 4
+          }
+        ],
+        "item_title": "Queries per zone",
+        "item_description": "Queries per zone",
+        "list_item_spec": {
+          "item_name": "zones",
+          "item_type": "map",
+          "item_optional": false,
+          "item_default": {},
+          "map_item_spec": [
+            {
+              "item_name": "zonename",
+              "item_type": "string",
+              "item_optional": false,
+              "item_default": "",
+              "item_title": "Zonename",
+              "item_description": "Zonename"
+            },
+            {
+              "item_name": "queries.udp",
+              "item_type": "integer",
+              "item_optional": false,
+              "item_default": 0,
+              "item_title": "Queries UDP per zone",
+              "item_description": "A number of UDP query counts per zone"
+            },
+            {
+              "item_name": "queries.tcp",
+              "item_type": "integer",
+              "item_optional": false,
+              "item_default": 0,
+              "item_title": "Queries TCP per zone",
+              "item_description": "A number of TCP query counts per zone"
+            }
+          ]
+        }
       }
     ]
   }
@@ -251,6 +302,11 @@ class MockAuth:
         self.got_command_name = ''
         self.queries_tcp = 3
         self.queries_udp = 2
+        self.queries_per_zone = [{
+                'zonename': 'test1.example',
+                'queries.tcp': 5,
+                'queries.udp': 4
+                }]
 
     def run(self):
         self.mccs.start()
@@ -273,7 +329,8 @@ class MockAuth:
         if command == 'sendstats':
             params = { "owner": "Auth",
                        "data": { 'queries.tcp': self.queries_tcp,
-                                 'queries.udp': self.queries_udp } }
+                                 'queries.udp': self.queries_udp,
+                                 'queries.per-zone' : self.queries_per_zone } }
             return send_command("set", "Stats", params=params, session=self.cc_session)
         return isc.config.create_answer(1, "Unknown Command")
 
diff --git a/src/bin/xfrin/tests/Makefile.am b/src/bin/xfrin/tests/Makefile.am
index 8f4fa91..cffafe1 100644
--- a/src/bin/xfrin/tests/Makefile.am
+++ b/src/bin/xfrin/tests/Makefile.am
@@ -10,7 +10,7 @@ LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
 LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
 else
-# sunstudio needs the ds path even if not all paths are necessary
+# Some systems need the ds path even if not all paths are necessary
 LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
 endif
 
diff --git a/src/bin/xfrout/tests/Makefile.am b/src/bin/xfrout/tests/Makefile.am
index ace8fc9..509df79 100644
--- a/src/bin/xfrout/tests/Makefile.am
+++ b/src/bin/xfrout/tests/Makefile.am
@@ -2,11 +2,18 @@ PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
 PYTESTS = xfrout_test.py
 noinst_SCRIPTS = $(PYTESTS)
 
+EXTRA_DIST = testdata/test.sqlite3
+# This one is actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com
+
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
 LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/util/io/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$(abs_top_builddir)/src/lib/acl/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -24,5 +31,6 @@ endif
 	B10_FROM_BUILD=$(abs_top_builddir) \
 	$(LIBRARY_PATH_PLACEHOLDER) \
 	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/bin/xfrout:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/util/io/.libs \
+	TESTDATASRCDIR=$(abs_srcdir)/testdata/ \
 	$(PYCOVERAGE_RUN) $(abs_builddir)/$$pytest || exit ; \
 	done
diff --git a/src/bin/xfrout/tests/testdata/example.com b/src/bin/xfrout/tests/testdata/example.com
new file mode 100644
index 0000000..25c5e6a
--- /dev/null
+++ b/src/bin/xfrout/tests/testdata/example.com
@@ -0,0 +1,6 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+example.com.         3600  IN  SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com.         3600  IN  NS  a.dns.example.com.
+a.dns.example.com.   3600  IN  A    192.0.2.1
+a.dns.example.com.   7200  IN  A    192.0.2.2
diff --git a/src/bin/xfrout/tests/testdata/test.sqlite3 b/src/bin/xfrout/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..af491f5
Binary files /dev/null and b/src/bin/xfrout/tests/testdata/test.sqlite3 differ
diff --git a/src/bin/xfrout/tests/xfrout_test.py.in b/src/bin/xfrout/tests/xfrout_test.py.in
index 0a9fd3c..8394b0a 100644
--- a/src/bin/xfrout/tests/xfrout_test.py.in
+++ b/src/bin/xfrout/tests/xfrout_test.py.in
@@ -21,12 +21,13 @@ import os
 from isc.testutils.tsigctx_mock import MockTSIGContext
 from isc.cc.session import *
 import isc.config
-from pydnspp import *
+from isc.dns import *
 from xfrout import *
 import xfrout
 import isc.log
 import isc.acl.dns
 
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
 TSIG_KEY = TSIGKey("example.com:SFuWd/q99SzF8Yzd1QbB9g==")
 
 # our fake socket, where we can read and insert messages
@@ -55,19 +56,64 @@ class MySocket():
         self.sendqueue = self.sendqueue[size:]
         return result
 
-    def read_msg(self):
+    def read_msg(self, parse_options=Message.PARSE_DEFAULT):
         sent_data = self.readsent()
         get_msg = Message(Message.PARSE)
-        get_msg.from_wire(bytes(sent_data[2:]))
+        get_msg.from_wire(bytes(sent_data[2:]), parse_options)
         return get_msg
 
     def clear_send(self):
         del self.sendqueue[:]
 
-# We subclass the Session class we're testing here, only
-# to override the handle() and _send_data() method
+class MockDataSrcClient:
+    def __init__(self, type, config):
+        pass
+
+    def get_iterator(self, zone_name, adjust_ttl=False):
+        if zone_name == Name('notauth.example.com'):
+            raise isc.datasrc.Error('no such zone')
+        self._zone_name = zone_name
+        return self
+
+    def get_soa(self):  # emulate ZoneIterator.get_soa()
+        if self._zone_name == Name('nosoa.example.com'):
+            return None
+        soa_rrset = RRset(self._zone_name, RRClass.IN(), RRType.SOA(),
+                          RRTTL(3600))
+        soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                                  'master.example.com. ' +
+                                  'admin.example.com. 1234 ' +
+                                  '3600 1800 2419200 7200'))
+        if self._zone_name == Name('multisoa.example.com'):
+            soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                                      'master.example.com. ' +
+                                      'admin.example.com. 1300 ' +
+                                      '3600 1800 2419200 7200'))
+        return soa_rrset
+
+class MyCCSession(isc.config.ConfigData):
+    def __init__(self):
+        module_spec = isc.config.module_spec_from_file(
+            xfrout.SPECFILE_LOCATION)
+        ConfigData.__init__(self, module_spec)
+
+    def get_remote_config_value(self, module_name, identifier):
+        if module_name == "Auth" and identifier == "database_file":
+            return "initdb.file", False
+        else:
+            return "unknown", False
+
+# This constant dictionary stores all default configuration parameters
+# defined in the xfrout spec file.
+DEFAULT_CONFIG = MyCCSession().get_full_config()
+
+# We subclass the Session class we're testing here, only overriding a few
+# methods
 class MyXfroutSession(XfroutSession):
-    def handle(self):
+    def _handle(self):
+        pass
+
+    def _close_socket(self):
         pass
 
     def _send_data(self, sock, data):
@@ -80,12 +126,23 @@ class MyXfroutSession(XfroutSession):
 class Dbserver:
     def __init__(self):
         self._shutdown_event = threading.Event()
+        self.transfer_counter = 0
+        self._max_transfers_out = DEFAULT_CONFIG['transfers_out']
     def get_db_file(self):
-        return None
+        return 'test.sqlite3'
+    def increase_transfers_counter(self):
+        self.transfer_counter += 1
+        return True
     def decrease_transfers_counter(self):
-        pass
+        self.transfer_counter -= 1
+
+class TestXfroutSessionBase(unittest.TestCase):
+    '''Base classs for tests related to xfrout sessions
+
+    This class defines common setup/teadown and utility methods.  Actual
+    tests are delegated to subclasses.
 
-class TestXfroutSession(unittest.TestCase):
+    '''
     def getmsg(self):
         msg = Message(Message.PARSE)
         msg.from_wire(self.mdata)
@@ -102,15 +159,15 @@ class TestXfroutSession(unittest.TestCase):
     def message_has_tsig(self, msg):
         return msg.get_tsig_record() is not None
 
-    def create_request_data(self, with_tsig=False):
+    def create_request_data(self, with_question=True, with_tsig=False):
         msg = Message(Message.RENDER)
         query_id = 0x1035
         msg.set_qid(query_id)
         msg.set_opcode(Opcode.QUERY())
         msg.set_rcode(Rcode.NOERROR())
-        query_question = Question(Name("example.com"), RRClass.IN(),
-                                  RRType.AXFR())
-        msg.add_question(query_question)
+        if with_question:
+            msg.add_question(Question(Name("example.com"), RRClass.IN(),
+                                      RRType.AXFR()))
 
         renderer = MessageRenderer()
         if with_tsig:
@@ -124,20 +181,76 @@ class TestXfroutSession(unittest.TestCase):
     def setUp(self):
         self.sock = MySocket(socket.AF_INET,socket.SOCK_STREAM)
         self.xfrsess = MyXfroutSession(self.sock, None, Dbserver(),
-                                       TSIGKeyRing(), ('127.0.0.1', 12345),
+                                       TSIGKeyRing(),
+                                       (socket.AF_INET, socket.SOCK_STREAM,
+                                        ('127.0.0.1', 12345)),
                                        # When not testing ACLs, simply accept
                                        isc.acl.dns.REQUEST_LOADER.load(
                                            [{"action": "ACCEPT"}]),
                                        {})
-        self.mdata = self.create_request_data(False)
-        self.soa_record = (4, 3, 'example.com.', 'com.example.', 3600, 'SOA', None, 'master.example.com. admin.example.com. 1234 3600 1800 2419200 7200')
+        self.mdata = self.create_request_data()
+        self.soa_rrset = RRset(Name('example.com'), RRClass.IN(), RRType.SOA(),
+                               RRTTL(3600))
+        self.soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                                       'master.Example.com. ' +
+                                       'admin.exAmple.com. ' +
+                                       '1234 3600 1800 2419200 7200'))
+        # some test replaces a module-wide function.  We should ensure the
+        # original is used elsewhere.
+        self.orig_get_rrset_len = xfrout.get_rrset_len
+
+    def tearDown(self):
+        xfrout.get_rrset_len = self.orig_get_rrset_len
+        # transfer_counter must be always be reset no matter happens within
+        # the XfroutSession object.  We check the condition here.
+        self.assertEqual(0, self.xfrsess._server.transfer_counter)
+
+class TestXfroutSession(TestXfroutSessionBase):
+    def test_quota_error(self):
+        '''Emulating the server being too busy.
+
+        '''
+        self.xfrsess._request_data = self.mdata
+        self.xfrsess._server.increase_transfers_counter = lambda : False
+        XfroutSession._handle(self.xfrsess)
+        self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.REFUSED())
+
+    def test_quota_ok(self):
+        '''The default case in terms of the xfrout quota.
+
+        '''
+        # set up a bogus request, which should result in FORMERR. (it only
+        # has to be something that is different from the previous case)
+        self.xfrsess._request_data = \
+            self.create_request_data(with_question=False)
+        # Replace the data source client to avoid datasrc related exceptions
+        self.xfrsess.ClientClass = MockDataSrcClient
+        XfroutSession._handle(self.xfrsess)
+        self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.FORMERR())
+
+    def test_exception_from_session(self):
+        '''Test the case where the main processing raises an exception.
+
+        We just check it doesn't any unexpected disruption and (in tearDown)
+        transfer_counter is correctly reset to 0.
+
+        '''
+        def dns_xfrout_start(fd, msg, quota):
+            raise ValueError('fake exception')
+        self.xfrsess.dns_xfrout_start = dns_xfrout_start
+        XfroutSession._handle(self.xfrsess)
 
     def test_parse_query_message(self):
         [get_rcode, get_msg] = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(get_rcode.to_text(), "NOERROR")
 
+        # Broken request: no question
+        request_data = self.create_request_data(with_question=False)
+        rcode, msg = self.xfrsess._parse_query_message(request_data)
+        self.assertEqual(Rcode.FORMERR(), rcode)
+
         # tsig signed query message
-        request_data = self.create_request_data(True)
+        request_data = self.create_request_data(with_tsig=True)
         # BADKEY
         [rcode, msg] = self.xfrsess._parse_query_message(request_data)
         self.assertEqual(rcode.to_text(), "NOTAUTH")
@@ -165,20 +278,23 @@ class TestXfroutSession(unittest.TestCase):
         rcode, msg = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(rcode.to_text(), "NOERROR")
         # This should be dropped completely, therefore returning None
-        self.xfrsess._remote = ('192.0.2.1', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.1', 12345))
         rcode, msg = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(None, rcode)
         # This should be refused, therefore REFUSED
-        self.xfrsess._remote = ('192.0.2.2', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.2', 12345))
         rcode, msg = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(rcode.to_text(), "REFUSED")
 
         # TSIG signed request
-        request_data = self.create_request_data(True)
+        request_data = self.create_request_data(with_tsig=True)
 
         # If the TSIG check fails, it should not check ACL
         # (If it checked ACL as well, it would just drop the request)
-        self.xfrsess._remote = ('192.0.2.1', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.1', 12345))
         self.xfrsess._tsig_key_ring = TSIGKeyRing()
         rcode, msg = self.xfrsess._parse_query_message(request_data)
         self.assertEqual(rcode.to_text(), "NOTAUTH")
@@ -216,19 +332,23 @@ class TestXfroutSession(unittest.TestCase):
                 {"action": "REJECT"}
         ]))
         # both matches
-        self.xfrsess._remote = ('192.0.2.1', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.1', 12345))
         [rcode, msg] = self.xfrsess._parse_query_message(request_data)
         self.assertEqual(rcode.to_text(), "NOERROR")
         # TSIG matches, but address doesn't
-        self.xfrsess._remote = ('192.0.2.2', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.2', 12345))
         [rcode, msg] = self.xfrsess._parse_query_message(request_data)
         self.assertEqual(rcode.to_text(), "REFUSED")
         # Address matches, but TSIG doesn't (not included)
-        self.xfrsess._remote = ('192.0.2.1', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.1', 12345))
         [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(rcode.to_text(), "REFUSED")
         # Neither address nor TSIG matches
-        self.xfrsess._remote = ('192.0.2.2', 12345)
+        self.xfrsess._remote = (socket.AF_INET, socket.SOCK_STREAM,
+                                ('192.0.2.2', 12345))
         [rcode, msg] = self.xfrsess._parse_query_message(self.mdata)
         self.assertEqual(rcode.to_text(), "REFUSED")
 
@@ -289,10 +409,6 @@ class TestXfroutSession(unittest.TestCase):
                          self.xfrsess._get_transfer_acl(Name('EXAMPLE.COM'),
                                                         RRClass.IN()))
 
-    def test_get_query_zone_name(self):
-        msg = self.getmsg()
-        self.assertEqual(self.xfrsess._get_query_zone_name(msg), "example.com.")
-
     def test_send_data(self):
         self.xfrsess._send_data(self.sock, self.mdata)
         senddata = self.sock.readsent()
@@ -315,10 +431,13 @@ class TestXfroutSession(unittest.TestCase):
     def test_send_message(self):
         msg = self.getmsg()
         msg.make_response()
-        # soa record data with different cases
-        soa_record = (4, 3, 'Example.com.', 'com.Example.', 3600, 'SOA', None, 'master.Example.com. admin.exAmple.com. 1234 3600 1800 2419200 7200')
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(soa_record)
-        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        # SOA record data with different cases
+        soa_rrset = RRset(Name('Example.com.'), RRClass.IN(), RRType.SOA(),
+                               RRTTL(3600))
+        soa_rrset.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                                  'master.Example.com. admin.exAmple.com. ' +
+                                  '1234 3600 1800 2419200 7200'))
+        msg.add_rrset(Message.SECTION_ANSWER, soa_rrset)
         self.xfrsess._send_message(self.sock, msg)
         send_out_data = self.sock.readsent()[2:]
 
@@ -347,23 +466,15 @@ class TestXfroutSession(unittest.TestCase):
         self.assertEqual(msg.get_rcode(), rcode)
         self.assertTrue(msg.get_header_flag(Message.HEADERFLAG_AA))
 
-    def test_create_rrset_from_db_record(self):
-        rrset = self.xfrsess._create_rrset_from_db_record(self.soa_record)
-        self.assertEqual(rrset.get_name().to_text(), "example.com.")
-        self.assertEqual(rrset.get_class(), RRClass("IN"))
-        self.assertEqual(rrset.get_type().to_text(), "SOA")
-        rdata = rrset.get_rdata()
-        self.assertEqual(rdata[0].to_text(), self.soa_record[7])
-
     def test_send_message_with_last_soa(self):
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
         msg = self.getmsg()
         msg.make_response()
 
         # packet number less than TSIG_SIGN_EVERY_NTH
         packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
-                                                 0, packet_neet_not_sign)
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset, 0,
+                                                 packet_neet_not_sign)
         get_msg = self.sock.read_msg()
         # tsig context is not exist
         self.assertFalse(self.message_has_tsig(get_msg))
@@ -378,12 +489,13 @@ class TestXfroutSession(unittest.TestCase):
         self.assertEqual(answer.get_class(), RRClass("IN"))
         self.assertEqual(answer.get_type().to_text(), "SOA")
         rdata = answer.get_rdata()
-        self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+        self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
 
         # msg is the TSIG_SIGN_EVERY_NTH one
         # sending the message with last soa together
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
-                                                 0, TSIG_SIGN_EVERY_NTH)
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset, 0,
+                                                 TSIG_SIGN_EVERY_NTH)
         get_msg = self.sock.read_msg()
         # tsig context is not exist
         self.assertFalse(self.message_has_tsig(get_msg))
@@ -392,7 +504,6 @@ class TestXfroutSession(unittest.TestCase):
         # create tsig context
         self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
 
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
         msg = self.getmsg()
         msg.make_response()
 
@@ -400,8 +511,9 @@ class TestXfroutSession(unittest.TestCase):
         packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
         # msg is not the TSIG_SIGN_EVERY_NTH one
         # sending the message with last soa together
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
-                                                 0, packet_neet_not_sign)
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset, 0,
+                                                 packet_neet_not_sign)
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
 
@@ -411,22 +523,23 @@ class TestXfroutSession(unittest.TestCase):
 
         # msg is the TSIG_SIGN_EVERY_NTH one
         # sending the message with last soa together
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa,
-                                                 0, TSIG_SIGN_EVERY_NTH)
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset, 0,
+                                                 TSIG_SIGN_EVERY_NTH)
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
 
     def test_trigger_send_message_with_last_soa(self):
         rrset_a = RRset(Name("example.com"), RRClass.IN(), RRType.A(), RRTTL(3600))
         rrset_a.add_rdata(Rdata(RRType.A(), RRClass.IN(), "192.0.2.1"))
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
 
         msg = self.getmsg()
         msg.make_response()
         msg.add_rrset(Message.SECTION_ANSWER, rrset_a)
 
         # length larger than MAX-len(rrset)
-        length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
+        length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
+            get_rrset_len(self.soa_rrset) + 1
         # packet number less than TSIG_SIGN_EVERY_NTH
         packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
 
@@ -434,7 +547,9 @@ class TestXfroutSession(unittest.TestCase):
         # this should have triggered the sending of two messages
         # (1 with the rrset we added manually, and 1 that triggered
         # the sending in _with_last_soa)
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset,
+                                                 length_need_split,
                                                  packet_neet_not_sign)
         get_msg = self.sock.read_msg()
         self.assertFalse(self.message_has_tsig(get_msg))
@@ -461,20 +576,20 @@ class TestXfroutSession(unittest.TestCase):
         self.assertEqual(answer.get_class(), RRClass("IN"))
         self.assertEqual(answer.get_type().to_text(), "SOA")
         rdata = answer.get_rdata()
-        self.assertEqual(rdata[0].to_text(), self.soa_record[7])
+        self.assertEqual(rdata[0], self.soa_rrset.get_rdata()[0])
 
         # and it should not have sent anything else
         self.assertEqual(0, len(self.sock.sendqueue))
 
     def test_trigger_send_message_with_last_soa_with_tsig(self):
         self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
         msg = self.getmsg()
         msg.make_response()
-        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        msg.add_rrset(Message.SECTION_ANSWER, self.soa_rrset)
 
         # length larger than MAX-len(rrset)
-        length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - get_rrset_len(rrset_soa) + 1
+        length_need_split = xfrout.XFROUT_MAX_MESSAGE_SIZE - \
+            get_rrset_len(self.soa_rrset) + 1
         # packet number less than TSIG_SIGN_EVERY_NTH
         packet_neet_not_sign = xfrout.TSIG_SIGN_EVERY_NTH - 1
 
@@ -482,7 +597,9 @@ class TestXfroutSession(unittest.TestCase):
         # this should have triggered the sending of two messages
         # (1 with the rrset we added manually, and 1 that triggered
         # the sending in _with_last_soa)
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset,
+                                                 length_need_split,
                                                  packet_neet_not_sign)
         get_msg = self.sock.read_msg()
         # msg is not the TSIG_SIGN_EVERY_NTH one, it shouldn't be tsig signed
@@ -495,7 +612,9 @@ class TestXfroutSession(unittest.TestCase):
 
 
         # msg is the TSIG_SIGN_EVERY_NTH one, it should be tsig signed
-        self.xfrsess._send_message_with_last_soa(msg, self.sock, rrset_soa, length_need_split,
+        self.xfrsess._send_message_with_last_soa(msg, self.sock,
+                                                 self.soa_rrset,
+                                                 length_need_split,
                                                  xfrout.TSIG_SIGN_EVERY_NTH)
         get_msg = self.sock.read_msg()
         self.assertTrue(self.message_has_tsig(get_msg))
@@ -506,49 +625,18 @@ class TestXfroutSession(unittest.TestCase):
         self.assertEqual(0, len(self.sock.sendqueue))
 
     def test_get_rrset_len(self):
-        rrset_soa = self.xfrsess._create_rrset_from_db_record(self.soa_record)
-        self.assertEqual(82, get_rrset_len(rrset_soa))
-
-    def test_zone_has_soa(self):
-        global sqlite3_ds
-        def mydb1(zone, file):
-            return True
-        sqlite3_ds.get_zone_soa = mydb1
-        self.assertTrue(self.xfrsess._zone_has_soa(""))
-        def mydb2(zone, file):
-            return False
-        sqlite3_ds.get_zone_soa = mydb2
-        self.assertFalse(self.xfrsess._zone_has_soa(""))
-
-    def test_zone_exist(self):
-        global sqlite3_ds
-        def zone_exist(zone, file):
-            return zone
-        sqlite3_ds.zone_exist = zone_exist
-        self.assertTrue(self.xfrsess._zone_exist(True))
-        self.assertFalse(self.xfrsess._zone_exist(False))
+        self.assertEqual(82, get_rrset_len(self.soa_rrset))
 
     def test_check_xfrout_available(self):
-        def zone_exist(zone):
-            return zone
-        def zone_has_soa(zone):
-            return (not zone)
-        self.xfrsess._zone_exist = zone_exist
-        self.xfrsess._zone_has_soa = zone_has_soa
-        self.assertEqual(self.xfrsess._check_xfrout_available(False).to_text(), "NOTAUTH")
-        self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "SERVFAIL")
-
-        def zone_empty(zone):
-            return zone
-        self.xfrsess._zone_has_soa = zone_empty
-        def false_func():
-            return False
-        self.xfrsess._server.increase_transfers_counter = false_func
-        self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "REFUSED")
-        def true_func():
-            return True
-        self.xfrsess._server.increase_transfers_counter = true_func
-        self.assertEqual(self.xfrsess._check_xfrout_available(True).to_text(), "NOERROR")
+        self.xfrsess.ClientClass = MockDataSrcClient
+        self.assertEqual(self.xfrsess._check_xfrout_available(
+                Name('example.com')), Rcode.NOERROR())
+        self.assertEqual(self.xfrsess._check_xfrout_available(
+                Name('notauth.example.com')), Rcode.NOTAUTH())
+        self.assertEqual(self.xfrsess._check_xfrout_available(
+                Name('nosoa.example.com')), Rcode.SERVFAIL())
+        self.assertEqual(self.xfrsess._check_xfrout_available(
+                Name('multisoa.example.com')), Rcode.SERVFAIL())
 
     def test_dns_xfrout_start_formerror(self):
         # formerror
@@ -560,7 +648,6 @@ class TestXfroutSession(unittest.TestCase):
         return "example.com"
 
     def test_dns_xfrout_start_notauth(self):
-        self.xfrsess._get_query_zone_name = self.default
         def notauth(formpara):
             return Rcode.NOTAUTH()
         self.xfrsess._check_xfrout_available = notauth
@@ -568,13 +655,19 @@ class TestXfroutSession(unittest.TestCase):
         get_msg = self.sock.read_msg()
         self.assertEqual(get_msg.get_rcode().to_text(), "NOTAUTH")
 
+    def test_dns_xfrout_start_datasrc_servfail(self):
+        def internal_raise(x, y):
+            raise isc.datasrc.Error('exception for the sake of test')
+        self.xfrsess.ClientClass = internal_raise
+        self.xfrsess.dns_xfrout_start(self.sock, self.mdata)
+        self.assertEqual(self.sock.read_msg().get_rcode(), Rcode.SERVFAIL())
+
     def test_dns_xfrout_start_noerror(self):
-        self.xfrsess._get_query_zone_name = self.default
         def noerror(form):
             return Rcode.NOERROR()
         self.xfrsess._check_xfrout_available = noerror
 
-        def myreply(msg, sock, zonename):
+        def myreply(msg, sock):
             self.sock.send(b"success")
 
         self.xfrsess._reply_xfrout_query = myreply
@@ -582,41 +675,27 @@ class TestXfroutSession(unittest.TestCase):
         self.assertEqual(self.sock.readsent(), b"success")
 
     def test_reply_xfrout_query_noerror(self):
-        global sqlite3_ds
-        def get_zone_soa(zonename, file):
-            return self.soa_record
-
-        def get_zone_datas(zone, file):
-            return [self.soa_record]
-
-        sqlite3_ds.get_zone_soa = get_zone_soa
-        sqlite3_ds.get_zone_datas = get_zone_datas
-        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+        self.xfrsess._soa = self.soa_rrset
+        self.xfrsess._iterator = [self.soa_rrset]
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
         reply_msg = self.sock.read_msg()
         self.assertEqual(reply_msg.get_rr_count(Message.SECTION_ANSWER), 2)
 
     def test_reply_xfrout_query_noerror_with_tsig(self):
-        rrset_data = (4, 3, 'a.example.com.', 'com.example.', 3600, 'A', None, '192.168.1.1')
-        global sqlite3_ds
+        rrset = RRset(Name('a.example.com'), RRClass.IN(), RRType.A(),
+                      RRTTL(3600))
+        rrset.add_rdata(Rdata(RRType.A(), RRClass.IN(), '192.0.2.1'))
         global xfrout
-        def get_zone_soa(zonename, file):
-            return self.soa_record
-
-        def get_zone_datas(zone, file):
-            zone_rrsets = []
-            for i in range(0, 100):
-                zone_rrsets.insert(i, rrset_data)
-            return zone_rrsets
 
         def get_rrset_len(rrset):
             return 65520
 
-        sqlite3_ds.get_zone_soa = get_zone_soa
-        sqlite3_ds.get_zone_datas = get_zone_datas
+        self.xfrsess._soa = self.soa_rrset
+        self.xfrsess._iterator = [rrset for i in range(0, 100)]
         xfrout.get_rrset_len = get_rrset_len
 
         self.xfrsess._tsig_ctx = self.create_mock_tsig_ctx(TSIGError.NOERROR)
-        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock, "example.com.")
+        self.xfrsess._reply_xfrout_query(self.getmsg(), self.sock)
 
         # tsig signed first package
         reply_msg = self.sock.read_msg()
@@ -640,18 +719,34 @@ class TestXfroutSession(unittest.TestCase):
         # and it should not have sent anything else
         self.assertEqual(0, len(self.sock.sendqueue))
 
-class MyCCSession(isc.config.ConfigData):
-    def __init__(self):
-        module_spec = isc.config.module_spec_from_file(
-            xfrout.SPECFILE_LOCATION)
-        ConfigData.__init__(self, module_spec)
 
-    def get_remote_config_value(self, module_name, identifier):
-        if module_name == "Auth" and identifier == "database_file":
-            return "initdb.file", False
-        else:
-            return "unknown", False
+class TestXfroutSessionWithSQLite3(TestXfroutSessionBase):
+    '''Tests for XFR-out sessions using an SQLite3 DB.
 
+    These are provided mainly to confirm the implementation actually works
+    in an environment closer to actual operational environments.  So we
+    only check a few common cases; other details are tested using mock
+    data sources.
+
+    '''
+    def setUp(self):
+        super().setUp()
+        self.xfrsess._request_data = self.mdata
+        self.xfrsess._server.get_db_file = lambda : TESTDATA_SRCDIR + \
+            'test.sqlite3'
+
+    def test_axfr_normal_session(self):
+        XfroutSession._handle(self.xfrsess)
+        response = self.sock.read_msg(Message.PRESERVE_ORDER);
+        self.assertEqual(Rcode.NOERROR(), response.get_rcode())
+        # This zone contains two A RRs for the same name with different TTLs.
+        # These TTLs should be preseved in the AXFR stream.
+        actual_ttls = []
+        for rr in response.get_section(Message.SECTION_ANSWER):
+            if rr.get_type() == RRType.A() and \
+                    not rr.get_ttl() in actual_ttls:
+                actual_ttls.append(rr.get_ttl().get_value())
+        self.assertEqual([3600, 7200], sorted(actual_ttls))
 
 class MyUnixSockServer(UnixSockServer):
     def __init__(self):
@@ -670,23 +765,27 @@ class TestUnixSockServer(unittest.TestCase):
            file descriptor. This is needed, because we get only that one
            from auth."""
         # We test with UDP, as it can be "connected" without other
-        # endpoint
+        # endpoint.  Note that in the current implementation _guess_remote()
+        # unconditionally returns SOCK_STREAM.
         sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
         sock.connect(('127.0.0.1', 12345))
-        self.assertEqual(('127.0.0.1', 12345),
+        self.assertEqual((socket.AF_INET, socket.SOCK_STREAM,
+                          ('127.0.0.1', 12345)),
                          self.unix._guess_remote(sock.fileno()))
         if socket.has_ipv6:
             # Don't check IPv6 address on hosts not supporting them
             sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
             sock.connect(('::1', 12345))
-            self.assertEqual(('::1', 12345, 0, 0),
+            self.assertEqual((socket.AF_INET6, socket.SOCK_STREAM,
+                              ('::1', 12345, 0, 0)),
                              self.unix._guess_remote(sock.fileno()))
             # Try when pretending there's no IPv6 support
             # (No need to pretend when there's really no IPv6)
             xfrout.socket.has_ipv6 = False
             sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
             sock.connect(('127.0.0.1', 12345))
-            self.assertEqual(('127.0.0.1', 12345),
+            self.assertEqual((socket.AF_INET, socket.SOCK_STREAM,
+                              ('127.0.0.1', 12345)),
                              self.unix._guess_remote(sock.fileno()))
             # Return it back
             xfrout.socket.has_ipv6 = True
diff --git a/src/bin/xfrout/xfrout.py.in b/src/bin/xfrout/xfrout.py.in
index cf3b04f..32dabbf 100755
--- a/src/bin/xfrout/xfrout.py.in
+++ b/src/bin/xfrout/xfrout.py.in
@@ -22,7 +22,7 @@ import isc.cc
 import threading
 import struct
 import signal
-from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient
 from socketserver import *
 import os
 from isc.config.ccsession import *
@@ -97,6 +97,38 @@ TSIG_SIGN_EVERY_NTH = 96
 
 XFROUT_MAX_MESSAGE_SIZE = 65535
 
+# borrowed from xfrin.py @ #1298.  We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+    """Helper function to format a zone name and class as a string of
+       the form '<name>/<class>'.
+       Parameters:
+       zone_name (isc.dns.Name) name to format
+       zone_class (isc.dns.RRClass) class to format
+    """
+    return zone_name.to_text() + '/' + str(zone_class)
+
+# borrowed from xfrin.py @ #1298.
+def format_addrinfo(addrinfo):
+    """Helper function to format the addrinfo as a string of the form
+       <addr>:<port> (for IPv4) or [<addr>]:port (for IPv6). For unix domain
+       sockets, and unknown address families, it returns a basic string
+       conversion of the third element of the passed tuple.
+       Parameters:
+       addrinfo: a 3-tuple consisting of address family, socket type, and,
+                 depending on the family, either a 2-tuple with the address
+                 and port, or a filename
+    """
+    try:
+        if addrinfo[0] == socket.AF_INET:
+            return str(addrinfo[2][0]) + ":" + str(addrinfo[2][1])
+        elif addrinfo[0] == socket.AF_INET6:
+            return "[" + str(addrinfo[2][0]) + "]:" + str(addrinfo[2][1])
+        else:
+            return str(addrinfo[2])
+    except IndexError:
+        raise TypeError("addrinfo argument to format_addrinfo() does not "
+                        "appear to be consisting of (family, socktype, (addr, port))")
+
 def get_rrset_len(rrset):
     """Returns the wire length of the given RRset"""
     bytes = bytearray()
@@ -106,7 +138,7 @@ def get_rrset_len(rrset):
 
 class XfroutSession():
     def __init__(self, sock_fd, request_data, server, tsig_key_ring, remote,
-                 default_acl, zone_config):
+                 default_acl, zone_config, client_class=DataSourceClient):
         self._sock_fd = sock_fd
         self._request_data = request_data
         self._server = server
@@ -114,23 +146,51 @@ class XfroutSession():
         self._tsig_ctx = None
         self._tsig_len = 0
         self._remote = remote
+        self._request_type = 'AXFR' # could be IXFR when we support it
         self._acl = default_acl
         self._zone_config = zone_config
-        self.handle()
+        self.ClientClass = client_class # parameterize this for testing
+        self._soa = None # will be set in _check_xfrout_available or in tests
+        self._handle()
 
     def create_tsig_ctx(self, tsig_record, tsig_key_ring):
         return TSIGContext(tsig_record.get_name(), tsig_record.get_rdata().get_algorithm(),
                            tsig_key_ring)
 
-    def handle(self):
-        ''' Handle a xfrout query, send xfrout response '''
+    def _handle(self):
+        ''' Handle a xfrout query, send xfrout response(s).
+
+        This is separated from the constructor so that we can override
+        it from tests.
+
+        '''
+        # Check the xfrout quota.  We do both increase/decrease in this
+        # method so it's clear we always release it once acuired.
+        quota_ok = self._server.increase_transfers_counter()
+        ex = None
         try:
-            self.dns_xfrout_start(self._sock_fd, self._request_data)
-            #TODO, avoid catching all exceptions
+            self.dns_xfrout_start(self._sock_fd, self._request_data, quota_ok)
         except Exception as e:
-            logger.error(XFROUT_HANDLE_QUERY_ERROR, e)
-            pass
+            # To avoid resource leak we need catch all possible exceptions
+            # We log it later to exclude the case where even logger raises
+            # an exception.
+            ex = e
+
+        # Release any critical resources
+        if quota_ok:
+            self._server.decrease_transfers_counter()
+        self._close_socket()
+
+        if ex is not None:
+            logger.error(XFROUT_HANDLE_QUERY_ERROR, ex)
 
+    def _close_socket(self):
+        '''Simply close the socket via the given FD.
+
+        This is a dedicated subroutine of handle() and is sepsarated from it
+        for the convenience of tests.
+
+        '''
         os.close(self._sock_fd)
 
     def _check_request_tsig(self, msg, request_data):
@@ -157,23 +217,31 @@ class XfroutSession():
 
         # TSIG related checks
         rcode = self._check_request_tsig(msg, mdata)
-
-        if rcode == Rcode.NOERROR():
-            # ACL checks
-            zone_name = msg.get_question()[0].get_name()
-            zone_class = msg.get_question()[0].get_class()
-            acl = self._get_transfer_acl(zone_name, zone_class)
-            acl_result = acl.execute(
-                isc.acl.dns.RequestContext(self._remote,
-                                           msg.get_tsig_record()))
-            if acl_result == DROP:
-                logger.info(XFROUT_QUERY_DROPPED, zone_name, zone_class,
-                            self._remote[0], self._remote[1])
-                return None, None
-            elif acl_result == REJECT:
-                logger.info(XFROUT_QUERY_REJECTED, zone_name, zone_class,
-                            self._remote[0], self._remote[1])
-                return Rcode.REFUSED(), msg
+        if rcode != Rcode.NOERROR():
+            return rcode, msg
+
+        # Make sure the question is valid.  This should be ensured by
+        # the auth server, but since it's far from our xfrout itself,
+        # we check it by ourselves.
+        if msg.get_rr_count(Message.SECTION_QUESTION) != 1:
+            return Rcode.FORMERR(), msg
+
+        # ACL checks
+        zone_name = msg.get_question()[0].get_name()
+        zone_class = msg.get_question()[0].get_class()
+        acl = self._get_transfer_acl(zone_name, zone_class)
+        acl_result = acl.execute(
+            isc.acl.dns.RequestContext(self._remote[2], msg.get_tsig_record()))
+        if acl_result == DROP:
+            logger.info(XFROUT_QUERY_DROPPED, self._request_type,
+                        format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class))
+            return None, None
+        elif acl_result == REJECT:
+            logger.info(XFROUT_QUERY_REJECTED, self._request_type,
+                        format_addrinfo(self._remote),
+                        format_zone_str(zone_name, zone_class))
+            return Rcode.REFUSED(), msg
 
         return rcode, msg
 
@@ -195,14 +263,6 @@ class XfroutSession():
             return self._zone_config[config_key]['transfer_acl']
         return self._acl
 
-    def _get_query_zone_name(self, msg):
-        question = msg.get_question()[0]
-        return question.get_name().to_text()
-
-    def _get_query_zone_class(self, msg):
-        question = msg.get_question()[0]
-        return question.get_class().to_text()
-
     def _send_data(self, sock_fd, data):
         size = len(data)
         total_count = 0
@@ -238,51 +298,49 @@ class XfroutSession():
         msg.set_rcode(rcode_)
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
-    def _zone_has_soa(self, zone):
-        '''Judge if the zone has an SOA record.'''
-        # In some sense, the SOA defines a zone.
-        # If the current name server has authority for the
-        # specific zone, we need to judge if the zone has an SOA record;
-        # if not, we consider the zone has incomplete data, so xfrout can't
-        # serve for it.
-        if sqlite3_ds.get_zone_soa(zone, self._server.get_db_file()):
-            return True
-
-        return False
-
-    def _zone_exist(self, zonename):
-        '''Judge if the zone is configured by config manager.'''
-        # Currently, if we find the zone in datasource successfully, we
-        # consider the zone is configured, and the current name server has
-        # authority for the specific zone.
-        # TODO: should get zone's configuration from cfgmgr or other place
-        # in future.
-        return sqlite3_ds.zone_exist(zonename, self._server.get_db_file())
-
     def _check_xfrout_available(self, zone_name):
         '''Check if xfr request can be responsed.
            TODO, Get zone's configuration from cfgmgr or some other place
            eg. check allow_transfer setting,
+
         '''
-        # If the current name server does not have authority for the
-        # zone, xfrout can't serve for it, return rcode NOTAUTH.
-        if not self._zone_exist(zone_name):
+
+        # Identify the data source for the requested zone and see if it has
+        # SOA while initializing objects used for request processing later.
+        # We should eventually generalize this so that we can choose the
+        # appropriate data source from (possible) multiple candidates.
+        # We should eventually take into account the RR class here.
+        # For now, we  hardcode a particular type (SQLite3-based), and only
+        # consider that one.
+        datasrc_config = '{ "database_file": "' + \
+            self._server.get_db_file() + '"}'
+        self._datasrc_client = self.ClientClass('sqlite3', datasrc_config)
+        try:
+            # Note that we enable 'separate_rrs'.  In xfr-out we need to
+            # preserve as many things as possible (even if it's half broken)
+            # stored in the zone.
+            self._iterator = self._datasrc_client.get_iterator(zone_name,
+                                                               True)
+        except isc.datasrc.Error:
+            # If the current name server does not have authority for the
+            # zone, xfrout can't serve for it, return rcode NOTAUTH.
+            # Note: this exception can happen for other reasons.  We should
+            # update get_iterator() API so that we can distinguish "no such
+            # zone" and other cases (#1373).  For now we consider all these
+            # cases as NOTAUTH.
             return Rcode.NOTAUTH()
 
         # If we are an authoritative name server for the zone, but fail
         # to find the zone's SOA record in datasource, xfrout can't
         # provide zone transfer for it.
-        if not self._zone_has_soa(zone_name):
+        self._soa = self._iterator.get_soa()
+        if self._soa is None or self._soa.get_rdata_count() != 1:
             return Rcode.SERVFAIL()
 
-        #TODO, check allow_transfer
-        if not self._server.increase_transfers_counter():
-            return Rcode.REFUSED()
-
         return Rcode.NOERROR()
 
 
-    def dns_xfrout_start(self, sock_fd, msg_query):
+    def dns_xfrout_start(self, sock_fd, msg_query, quota_ok=True):
         rcode_, msg = self._parse_query_message(msg_query)
         #TODO. create query message and parse header
         if rcode_ is None: # Dropped by ACL
@@ -292,29 +350,40 @@ class XfroutSession():
         elif rcode_ != Rcode.NOERROR():
             return self._reply_query_with_error_rcode(msg, sock_fd,
                                                       Rcode.FORMERR())
+        elif not quota_ok:
+            logger.warn(XFROUT_QUERY_QUOTA_EXCCEEDED, self._request_type,
+                        format_addrinfo(self._remote),
+                        self._server._max_transfers_out)
+            return self._reply_query_with_error_rcode(msg, sock_fd,
+                                                      Rcode.REFUSED())
 
-        zone_name = self._get_query_zone_name(msg)
-        zone_class_str = self._get_query_zone_class(msg)
-        # TODO: should we not also include class in the check?
-        rcode_ = self._check_xfrout_available(zone_name)
+        question = msg.get_question()[0]
+        zone_name = question.get_name()
+        zone_class = question.get_class()
+        zone_str = format_zone_str(zone_name, zone_class) # for logging
 
+        # TODO: we should also include class in the check
+        try:
+            rcode_ = self._check_xfrout_available(zone_name)
+        except Exception as ex:
+            logger.error(XFROUT_XFR_TRANSFER_CHECK_ERROR, self._request_type,
+                         format_addrinfo(self._remote), zone_str, ex)
+            rcode_ = Rcode.SERVFAIL()
         if rcode_ != Rcode.NOERROR():
-            logger.info(XFROUT_AXFR_TRANSFER_FAILED, zone_name,
-                        zone_class_str, rcode_.to_text())
+            logger.info(XFROUT_AXFR_TRANSFER_FAILED, self._request_type,
+                        format_addrinfo(self._remote), zone_str, rcode_)
             return self._reply_query_with_error_rcode(msg, sock_fd, rcode_)
 
         try:
-            logger.info(XFROUT_AXFR_TRANSFER_STARTED, zone_name, zone_class_str)
-            self._reply_xfrout_query(msg, sock_fd, zone_name)
+            logger.info(XFROUT_AXFR_TRANSFER_STARTED, self._request_type,
+                        format_addrinfo(self._remote), zone_str)
+            self._reply_xfrout_query(msg, sock_fd)
         except Exception as err:
-            logger.error(XFROUT_AXFR_TRANSFER_ERROR, zone_name,
-                         zone_class_str, str(err))
+            logger.error(XFROUT_AXFR_TRANSFER_ERROR, self._request_type,
+                    format_addrinfo(self._remote), zone_str, err)
             pass
-        logger.info(XFROUT_AXFR_TRANSFER_DONE, zone_name, zone_class_str)
-
-        self._server.decrease_transfers_counter()
-        return
-
+        logger.info(XFROUT_AXFR_TRANSFER_DONE, self._request_type,
+                    format_addrinfo(self._remote), zone_str)
 
     def _clear_message(self, msg):
         qid = msg.get_qid()
@@ -329,16 +398,6 @@ class XfroutSession():
         msg.set_header_flag(Message.HEADERFLAG_QR)
         return msg
 
-    def _create_rrset_from_db_record(self, record):
-        '''Create one rrset from one record of datasource, if the schema of record is changed,
-        This function should be updated first.
-        '''
-        rrtype_ = RRType(record[5])
-        rdata_ = Rdata(rrtype_, RRClass("IN"), " ".join(record[7:]))
-        rrset_ = RRset(Name(record[2]), RRClass("IN"), rrtype_, RRTTL( int(record[4])))
-        rrset_.add_rdata(rdata_)
-        return rrset_
-
     def _send_message_with_last_soa(self, msg, sock_fd, rrset_soa, message_upper_len,
                                     count_since_last_tsig_sign):
         '''Add the SOA record to the end of message. If it can't be
@@ -361,33 +420,30 @@ class XfroutSession():
         self._send_message(sock_fd, msg, self._tsig_ctx)
 
 
-    def _reply_xfrout_query(self, msg, sock_fd, zone_name):
+    def _reply_xfrout_query(self, msg, sock_fd):
         #TODO, there should be a better way to insert rrset.
         count_since_last_tsig_sign = TSIG_SIGN_EVERY_NTH
         msg.make_response()
         msg.set_header_flag(Message.HEADERFLAG_AA)
-        soa_record = sqlite3_ds.get_zone_soa(zone_name, self._server.get_db_file())
-        rrset_soa = self._create_rrset_from_db_record(soa_record)
-        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        msg.add_rrset(Message.SECTION_ANSWER, self._soa)
 
-        message_upper_len = get_rrset_len(rrset_soa) + self._tsig_len
+        message_upper_len = get_rrset_len(self._soa) + self._tsig_len
 
-        for rr_data in sqlite3_ds.get_zone_datas(zone_name, self._server.get_db_file()):
-            if  self._server._shutdown_event.is_set(): # Check if xfrout is shutdown
+        for rrset in self._iterator:
+            # Check if xfrout is shutdown
+            if  self._server._shutdown_event.is_set():
                 logger.info(XFROUT_STOPPING)
                 return
-            # TODO: RRType.SOA() ?
-            if RRType(rr_data[5]) == RRType("SOA"): #ignore soa record
-                continue
 
-            rrset_ = self._create_rrset_from_db_record(rr_data)
+            if rrset.get_type() == RRType.SOA():
+                continue
 
             # We calculate the maximum size of the RRset (i.e. the
             # size without compression) and use that to see if we
             # may have reached the limit
-            rrset_len = get_rrset_len(rrset_)
+            rrset_len = get_rrset_len(rrset)
             if message_upper_len + rrset_len < XFROUT_MAX_MESSAGE_SIZE:
-                msg.add_rrset(Message.SECTION_ANSWER, rrset_)
+                msg.add_rrset(Message.SECTION_ANSWER, rrset)
                 message_upper_len += rrset_len
                 continue
 
@@ -400,7 +456,8 @@ class XfroutSession():
 
             count_since_last_tsig_sign += 1
             msg = self._clear_message(msg)
-            msg.add_rrset(Message.SECTION_ANSWER, rrset_) # Add the rrset to the new message
+            # Add the RRset to the new message
+            msg.add_rrset(Message.SECTION_ANSWER, rrset)
 
             # Reserve tsig space for signed packet
             if count_since_last_tsig_sign == TSIG_SIGN_EVERY_NTH:
@@ -408,7 +465,8 @@ class XfroutSession():
             else:
                 message_upper_len = rrset_len
 
-        self._send_message_with_last_soa(msg, sock_fd, rrset_soa, message_upper_len,
+        self._send_message_with_last_soa(msg, sock_fd, self._soa,
+                                         message_upper_len,
                                          count_since_last_tsig_sign)
 
 class UnixSockServer(socketserver_mixin.NoPollMixIn,
@@ -517,9 +575,12 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
         t.start()
 
     def _guess_remote(self, sock_fd):
-        """
-           Guess remote address and port of the socket. The sock_fd must be a
-           socket
+        """Guess remote address and port of the socket.
+
+        The sock_fd must be a file descriptor of a socket.
+        This method retuns a 3-tuple consisting of address family,
+        socket type, and a 2-tuple with the address (string) and port (int).
+
         """
         # This uses a trick. If the socket is IPv4 in reality and we pretend
         # it to be IPv6, it returns IPv4 address anyway. This doesn't seem
@@ -531,11 +592,23 @@ class UnixSockServer(socketserver_mixin.NoPollMixIn,
             # To make it work even on hosts without IPv6 support
             # (Any idea how to simulate this in test?)
             sock = socket.fromfd(sock_fd, socket.AF_INET, socket.SOCK_STREAM)
-        return sock.getpeername()
+        peer = sock.getpeername()
+
+        # Identify the correct socket family.  Due to the above "trick",
+        # we cannot simply use sock.family.
+        family = socket.AF_INET6
+        try:
+            socket.inet_pton(socket.AF_INET6, peer[0])
+        except socket.error:
+            family = socket.AF_INET
+        return (family, socket.SOCK_STREAM, peer)
 
     def finish_request(self, sock_fd, request_data):
         '''Finish one request by instantiating RequestHandlerClass.
 
+        This is an entry point of a separate thread spawned in
+        UnixSockServer.process_request().
+
         This method creates a XfroutSession object.
         '''
         self._lock.acquire()
diff --git a/src/bin/xfrout/xfrout_messages.mes b/src/bin/xfrout/xfrout_messages.mes
index b2e432c..894ade5 100644
--- a/src/bin/xfrout/xfrout_messages.mes
+++ b/src/bin/xfrout/xfrout_messages.mes
@@ -15,17 +15,24 @@
 # No namespace declaration - these constants go in the global namespace
 # of the xfrout messages python module.
 
-% XFROUT_AXFR_TRANSFER_DONE transfer of %1/%2 complete
+% XFROUT_AXFR_TRANSFER_DONE %1 client %2: transfer of %3 complete
 The transfer of the given zone has been completed successfully, or was
 aborted due to a shutdown event.
 
-% XFROUT_AXFR_TRANSFER_ERROR error transferring zone %1/%2: %3
+% XFROUT_AXFR_TRANSFER_ERROR %1 client %2: error transferring zone %3: %4
 An uncaught exception was encountered while sending the response to
 an AXFR query. The error message of the exception is included in the
 log message, but this error most likely points to incomplete exception
 handling in the code.
 
-% XFROUT_AXFR_TRANSFER_FAILED transfer of %1/%2 failed, rcode: %3
+% XFROUT_XFR_TRANSFER_CHECK_ERROR %1 client %2: check for transfer of %3 failed: %4
+Pre-response check for an incomding XFR request failed unexpectedly.
+The most likely cause of this is that some low level error in the data
+source, but it may also be other general (more unlikely) errors such
+as memory shortage.  Some detail of the error is also included in the
+message.  The xfrout server tries to return a SERVFAIL response in this case.
+
+% XFROUT_AXFR_TRANSFER_FAILED %1 client %2: transfer of %3 failed, rcode: %4
 A transfer out for the given zone failed. An error response is sent
 to the client. The given rcode is the rcode that is set in the error
 response. This is either NOTAUTH (we are not authoritative for the
@@ -36,7 +43,7 @@ Xfrout/max_transfers_out, has been reached).
 # Still a TODO, but when implemented, REFUSED can also mean
 # the client is not allowed to transfer the zone
 
-% XFROUT_AXFR_TRANSFER_STARTED transfer of zone %1/%2 has started
+% XFROUT_AXFR_TRANSFER_STARTED %1 client %2: transfer of zone %3 has started
 A transfer out of the given zone has started.
 
 % XFROUT_BAD_TSIG_KEY_STRING bad TSIG key string: %1
@@ -106,16 +113,27 @@ in the log message, but at this point no specific information other
 than that could be given. This points to incomplete exception handling
 in the code.
 
-% XFROUT_QUERY_DROPPED request to transfer %1/%2 to [%3]:%4 dropped
-The xfrout process silently dropped a request to transfer zone to given host.
-This is required by the ACLs. The %1 and %2 represent the zone name and class,
-the %3 and %4 the IP address and port of the peer requesting the transfer.
+% XFROUT_QUERY_DROPPED %1 client %2: request to transfer %3 dropped
+The xfrout process silently dropped a request to transfer zone to
+given host.  This is required by the ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
 
-% XFROUT_QUERY_REJECTED request to transfer %1/%2 to [%3]:%4 rejected
+% XFROUT_QUERY_REJECTED %1 client %2: request to transfer %3 rejected
 The xfrout process rejected (by REFUSED rcode) a request to transfer zone to
-given host. This is because of ACLs. The %1 and %2 represent the zone name and
-class, the %3 and %4 the IP address and port of the peer requesting the
-transfer.
+given host. This is because of ACLs.  The %2 represents the IP
+address and port of the peer requesting the transfer, and the %3
+represents the zone name and class.
+
+% XFROUT_QUERY_QUOTA_EXCCEEDED %1 client %2: request denied due to quota (%3)
+The xfr request was rejected because the server was already handling
+the maximum number of allowable transfers as specified in the transfers_out
+configuration parameter, which is also shown in the log message.  The
+request was immediately responded and terminated with an RCODE of REFUSED.
+This can happen for a busy xfrout server, and you may want to increase
+this parameter; if the server is being too busy due to requests from
+unexpected clients you may want to restrict the legitimate clients
+with ACL.
 
 % XFROUT_RECEIVE_FILE_DESCRIPTOR_ERROR error receiving the file descriptor for an XFR connection
 There was an error receiving the file descriptor for the transfer
diff --git a/src/lib/datasrc/client.h b/src/lib/datasrc/client.h
index 2c3f709..18d04f7 100644
--- a/src/lib/datasrc/client.h
+++ b/src/lib/datasrc/client.h
@@ -215,18 +215,19 @@ public:
     ///
     /// \param name The name of zone apex to be traversed. It doesn't do
     ///     nearest match as findZone.
-    /// \param adjust_ttl If true, the iterator will treat RRs with the same
-    ///                   name and type but different TTL values to be of the
-    ///                   same RRset, and will adjust the TTL to the lowest
-    ///                   value found. If false, it will consider the RR to
-    ///                   belong to a different RRset.
+    /// \param separate_rrs If true, the iterator will return each RR as a
+    ///                     new RRset object. If false, the iterator will
+    ///                     combine consecutive RRs with the name and type
+    ///                     into 1 RRset. The capitalization of the RRset will
+    ///                     be that of the first RR read, and TTLs will be
+    ///                     adjusted to the lowest one found.
     /// \return Pointer to the iterator.
     virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
-                                        bool adjust_ttl = true) const {
+                                        bool separate_rrs = false) const {
         // This is here to both document the parameter in doxygen (therefore it
         // needs a name) and avoid unused parameter warning.
         static_cast<void>(name);
-        static_cast<void>(adjust_ttl);
+        static_cast<void>(separate_rrs);
 
         isc_throw(isc::NotImplemented,
                   "Data source doesn't support iteration");
@@ -272,6 +273,22 @@ public:
     /// In such cases this method will result in an \c isc::NotImplemented
     /// exception unconditionally or when \c replace is false).
     ///
+    /// If \c journaling is true, the data source should store a journal
+    /// of changes. These can be used later on by, for example, IXFR-out.
+    /// However, the parameter is a hint only. It might be unable to store
+    /// them and they would be silently discarded. Or it might need to
+    /// store them no matter what (for example a git-based data source would
+    /// store journal implicitly). When the \c journaling is true, it
+    /// requires that the following update be formatted as IXFR transfer
+    /// (SOA to be removed, bunch of RRs to be removed, SOA to be added,
+    /// bunch of RRs to be added, and possibly repeated). However, it is not
+    /// required that the updater checks that. If it is false, it must not
+    /// require so and must accept any order of changes.
+    ///
+    /// We don't support erasing the whole zone (by replace being true) and
+    /// saving a journal at the same time. In such situation, BadValue is
+    /// thrown.
+    ///
     /// \note To avoid throwing the exception accidentally with a lazy
     /// implementation, we still keep this method pure virtual without
     /// an implementation.  All derived classes must explicitly define this
@@ -282,14 +299,18 @@ public:
     /// \exception DataSourceError Internal error in the underlying data
     /// source.
     /// \exception std::bad_alloc Resource allocation failure.
+    /// \exception BadValue if both replace and journaling are true.
     ///
     /// \param name The zone name to be updated
     /// \param replace Whether to delete existing RRs before making updates
+    /// \param journaling The zone updater should store a journal of the
+    ///     changes.
     ///
     /// \return A pointer to the updater; it will be NULL if the specified
     /// zone isn't found.
     virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
-                                      bool replace) const = 0;
+                                      bool replace, bool journaling = false)
+        const = 0;
 };
 }
 }
diff --git a/src/lib/datasrc/data_source.h b/src/lib/datasrc/data_source.h
index a7a15a9..c35f0d3 100644
--- a/src/lib/datasrc/data_source.h
+++ b/src/lib/datasrc/data_source.h
@@ -53,6 +53,18 @@ public:
         isc::Exception(file, line, what) {}
 };
 
+/// \brief No such serial number when obtaining difference iterator
+///
+/// Thrown if either the zone/start serial number or zone/end serial number
+/// combination does not exist in the differences table.  (Note that this
+/// includes the case where the differences table contains no records related
+/// to that zone.)
+class NoSuchSerial : public DataSourceError {
+public:
+    NoSuchSerial(const char* file, size_t line, const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
 
 class AbstractDataSrc {
     ///
diff --git a/src/lib/datasrc/database.cc b/src/lib/datasrc/database.cc
index f06cdc0..0e867ba 100644
--- a/src/lib/datasrc/database.cc
+++ b/src/lib/datasrc/database.cc
@@ -707,11 +707,11 @@ public:
     DatabaseIterator(shared_ptr<DatabaseAccessor> accessor,
                      const Name& zone_name,
                      const RRClass& rrclass,
-                     bool adjust_ttl) :
+                     bool separate_rrs) :
         accessor_(accessor),
         class_(rrclass),
         ready_(true),
-        adjust_ttl_(adjust_ttl)
+        separate_rrs_(separate_rrs)
     {
         // Get the zone
         const pair<bool, int> zone(accessor_->getZone(zone_name.toText()));
@@ -769,20 +769,19 @@ public:
         const RRType rtype(rtype_str);
         RRsetPtr rrset(new RRset(name, class_, rtype, RRTTL(ttl)));
         while (data_ready_ && name_ == name_str && rtype_str == rtype_) {
-            if (adjust_ttl_) {
-                if (ttl_ != ttl) {
-                    if (ttl < ttl_) {
-                        ttl_ = ttl;
-                        rrset->setTTL(RRTTL(ttl));
-                    }
-                    LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
-                        arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
+            if (ttl_ != ttl) {
+                if (ttl < ttl_) {
+                    ttl_ = ttl;
+                    rrset->setTTL(RRTTL(ttl));
                 }
-            } else if (ttl_ != ttl) {
-                break;
+                LOG_WARN(logger, DATASRC_DATABASE_ITERATE_TTL_MISMATCH).
+                    arg(name_).arg(class_).arg(rtype_).arg(rrset->getTTL());
             }
             rrset->addRdata(rdata::createRdata(rtype, class_, rdata_));
             getData();
+            if (separate_rrs_) {
+                break;
+            }
         }
         LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE_NEXT).
             arg(rrset->getName()).arg(rrset->getType());
@@ -814,18 +813,18 @@ private:
     string name_, rtype_, rdata_, ttl_;
     // Whether to modify differing TTL values, or treat a different TTL as
     // a different RRset
-    bool adjust_ttl_;
+    bool separate_rrs_;
 };
 
 }
 
 ZoneIteratorPtr
 DatabaseClient::getIterator(const isc::dns::Name& name,
-                            bool adjust_ttl) const
+                            bool separate_rrs) const
 {
     ZoneIteratorPtr iterator = ZoneIteratorPtr(new DatabaseIterator(
                                                    accessor_->clone(), name,
-                                                   rrclass_, adjust_ttl));
+                                                   rrclass_, separate_rrs));
     LOG_DEBUG(logger, DBG_TRACE_DETAILED, DATASRC_DATABASE_ITERATE).
         arg(name);
 
@@ -838,10 +837,12 @@ DatabaseClient::getIterator(const isc::dns::Name& name,
 class DatabaseUpdater : public ZoneUpdater {
 public:
     DatabaseUpdater(shared_ptr<DatabaseAccessor> accessor, int zone_id,
-            const Name& zone_name, const RRClass& zone_class) :
+            const Name& zone_name, const RRClass& zone_class,
+            bool journaling) :
         committed_(false), accessor_(accessor), zone_id_(zone_id),
         db_name_(accessor->getDBName()), zone_name_(zone_name.toText()),
-        zone_class_(zone_class),
+        zone_class_(zone_class), journaling_(journaling),
+        diff_phase_(NOT_STARTED),
         finder_(new DatabaseClient::Finder(accessor_, zone_id_, zone_name))
     {
         logger.debug(DBG_TRACE_DATA, DATASRC_DATABASE_UPDATER_CREATED)
@@ -877,45 +878,97 @@ public:
     virtual void commit();
 
 private:
+    // A short cut typedef only for making the code shorter.
+    typedef DatabaseAccessor Accessor;
+
     bool committed_;
     shared_ptr<DatabaseAccessor> accessor_;
     const int zone_id_;
     const string db_name_;
     const string zone_name_;
     const RRClass zone_class_;
+    const bool journaling_;
+    // For the journals
+    enum DiffPhase {
+        NOT_STARTED,
+        DELETE,
+        ADD
+    };
+    DiffPhase diff_phase_;
+    uint32_t serial_;
     boost::scoped_ptr<DatabaseClient::Finder> finder_;
+
+    // This is a set of validation checks commonly used for addRRset() and
+    // deleteRRset to minimize duplicate code logic and to make the main
+    // code concise.
+    void validateAddOrDelete(const char* const op_str, const RRset& rrset,
+                             DiffPhase prev_phase,
+                             DiffPhase current_phase) const;
 };
 
 void
-DatabaseUpdater::addRRset(const RRset& rrset) {
+DatabaseUpdater::validateAddOrDelete(const char* const op_str,
+                                     const RRset& rrset,
+                                     DiffPhase prev_phase,
+                                     DiffPhase current_phase) const
+{
     if (committed_) {
-        isc_throw(DataSourceError, "Add attempt after commit to zone: "
+        isc_throw(DataSourceError, op_str << " attempt after commit to zone: "
                   << zone_name_ << "/" << zone_class_);
     }
+    if (rrset.getRdataCount() == 0) {
+        isc_throw(DataSourceError, op_str << " attempt with an empty RRset: "
+                  << rrset.getName() << "/" << zone_class_ << "/"
+                  << rrset.getType());
+    }
     if (rrset.getClass() != zone_class_) {
-        isc_throw(DataSourceError, "An RRset of a different class is being "
-                  << "added to " << zone_name_ << "/" << zone_class_ << ": "
+        isc_throw(DataSourceError, op_str << " attempt for a different class "
+                  << zone_name_ << "/" << zone_class_ << ": "
                   << rrset.toText());
     }
     if (rrset.getRRsig()) {
-        isc_throw(DataSourceError, "An RRset with RRSIG is being added to "
+        isc_throw(DataSourceError, op_str << " attempt for RRset with RRSIG "
                   << zone_name_ << "/" << zone_class_ << ": "
                   << rrset.toText());
     }
+    if (journaling_) {
+        const RRType rrtype(rrset.getType());
+        if (rrtype == RRType::SOA() && diff_phase_ != prev_phase) {
+            isc_throw(isc::BadValue, op_str << " attempt in an invalid "
+                      << "diff phase: " << diff_phase_ << ", rrset: " <<
+                      rrset.toText());
+        }
+        if (rrtype != RRType::SOA() && diff_phase_ != current_phase) {
+            isc_throw(isc::BadValue, "diff state change by non SOA: "
+                      << rrset.toText());
+        }
+    }
+}
 
+void
+DatabaseUpdater::addRRset(const RRset& rrset) {
+    validateAddOrDelete("add", rrset, DELETE, ADD);
+
+    // It's guaranteed rrset has at least one RDATA at this point.
     RdataIteratorPtr it = rrset.getRdataIterator();
-    if (it->isLast()) {
-        isc_throw(DataSourceError, "An empty RRset is being added for "
-                  << rrset.getName() << "/" << zone_class_ << "/"
-                  << rrset.getType());
-    }
 
-    string columns[DatabaseAccessor::ADD_COLUMN_COUNT]; // initialized with ""
-    columns[DatabaseAccessor::ADD_NAME] = rrset.getName().toText();
-    columns[DatabaseAccessor::ADD_REV_NAME] =
-        rrset.getName().reverse().toText();
-    columns[DatabaseAccessor::ADD_TTL] = rrset.getTTL().toText();
-    columns[DatabaseAccessor::ADD_TYPE] = rrset.getType().toText();
+    string columns[Accessor::ADD_COLUMN_COUNT]; // initialized with ""
+    columns[Accessor::ADD_NAME] = rrset.getName().toText();
+    columns[Accessor::ADD_REV_NAME] = rrset.getName().reverse().toText();
+    columns[Accessor::ADD_TTL] = rrset.getTTL().toText();
+    columns[Accessor::ADD_TYPE] = rrset.getType().toText();
+    string journal[Accessor::DIFF_PARAM_COUNT];
+    if (journaling_) {
+        journal[Accessor::DIFF_NAME] = columns[Accessor::ADD_NAME];
+        journal[Accessor::DIFF_TYPE] = columns[Accessor::ADD_TYPE];
+        journal[Accessor::DIFF_TTL] = columns[Accessor::ADD_TTL];
+        diff_phase_ = ADD;
+        if (rrset.getType() == RRType::SOA()) {
+            serial_ =
+                dynamic_cast<const generic::SOA&>(it->getCurrent()).
+                getSerial();
+        }
+    }
     for (; !it->isLast(); it->next()) {
         if (rrset.getType() == RRType::RRSIG()) {
             // XXX: the current interface (based on the current sqlite3
@@ -925,43 +978,53 @@ DatabaseUpdater::addRRset(const RRset& rrset) {
             // the interface, but until then we have to conform to the schema.
             const generic::RRSIG& rrsig_rdata =
                 dynamic_cast<const generic::RRSIG&>(it->getCurrent());
-            columns[DatabaseAccessor::ADD_SIGTYPE] =
+            columns[Accessor::ADD_SIGTYPE] =
                 rrsig_rdata.typeCovered().toText();
         }
-        columns[DatabaseAccessor::ADD_RDATA] = it->getCurrent().toText();
+        columns[Accessor::ADD_RDATA] = it->getCurrent().toText();
+        if (journaling_) {
+            journal[Accessor::DIFF_RDATA] = columns[Accessor::ADD_RDATA];
+            accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_ADD,
+                                     journal);
+        }
         accessor_->addRecordToZone(columns);
     }
 }
 
 void
 DatabaseUpdater::deleteRRset(const RRset& rrset) {
-    if (committed_) {
-        isc_throw(DataSourceError, "Delete attempt after commit on zone: "
-                  << zone_name_ << "/" << zone_class_);
-    }
-    if (rrset.getClass() != zone_class_) {
-        isc_throw(DataSourceError, "An RRset of a different class is being "
-                  << "deleted from " << zone_name_ << "/" << zone_class_
-                  << ": " << rrset.toText());
-    }
-    if (rrset.getRRsig()) {
-        isc_throw(DataSourceError, "An RRset with RRSIG is being deleted from "
-                  << zone_name_ << "/" << zone_class_ << ": "
-                  << rrset.toText());
+    // If this is the first operation, pretend we are starting a new delete
+    // sequence after adds.  This will simplify the validation below.
+    if (diff_phase_ == NOT_STARTED) {
+        diff_phase_ = ADD;
     }
 
+    validateAddOrDelete("delete", rrset, ADD, DELETE);
+
     RdataIteratorPtr it = rrset.getRdataIterator();
-    if (it->isLast()) {
-        isc_throw(DataSourceError, "An empty RRset is being deleted for "
-                  << rrset.getName() << "/" << zone_class_ << "/"
-                  << rrset.getType());
-    }
 
-    string params[DatabaseAccessor::DEL_PARAM_COUNT]; // initialized with ""
-    params[DatabaseAccessor::DEL_NAME] = rrset.getName().toText();
-    params[DatabaseAccessor::DEL_TYPE] = rrset.getType().toText();
+    string params[Accessor::DEL_PARAM_COUNT]; // initialized with ""
+    params[Accessor::DEL_NAME] = rrset.getName().toText();
+    params[Accessor::DEL_TYPE] = rrset.getType().toText();
+    string journal[Accessor::DIFF_PARAM_COUNT];
+    if (journaling_) {
+        journal[Accessor::DIFF_NAME] = params[Accessor::DEL_NAME];
+        journal[Accessor::DIFF_TYPE] = params[Accessor::DEL_TYPE];
+        journal[Accessor::DIFF_TTL] = rrset.getTTL().toText();
+        diff_phase_ = DELETE;
+        if (rrset.getType() == RRType::SOA()) {
+            serial_ =
+                dynamic_cast<const generic::SOA&>(it->getCurrent()).
+                getSerial();
+        }
+    }
     for (; !it->isLast(); it->next()) {
-        params[DatabaseAccessor::DEL_RDATA] = it->getCurrent().toText();
+        params[Accessor::DEL_RDATA] = it->getCurrent().toText();
+        if (journaling_) {
+            journal[Accessor::DIFF_RDATA] = params[Accessor::DEL_RDATA];
+            accessor_->addRecordDiff(zone_id_, serial_, Accessor::DIFF_DELETE,
+                                     journal);
+        }
         accessor_->deleteRecordInZone(params);
     }
 }
@@ -973,6 +1036,9 @@ DatabaseUpdater::commit() {
                   << zone_name_ << "/" << zone_class_ << " on "
                   << db_name_);
     }
+    if (journaling_ && diff_phase_ == DELETE) {
+        isc_throw(isc::BadValue, "Update sequence not complete");
+    }
     accessor_->commit();
     committed_ = true; // make sure the destructor won't trigger rollback
 
@@ -986,7 +1052,13 @@ DatabaseUpdater::commit() {
 
 // The updater factory
 ZoneUpdaterPtr
-DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
+DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace,
+                           bool journaling) const
+{
+    if (replace && journaling) {
+        isc_throw(isc::BadValue, "Can't store journal and replace the whole "
+                  "zone at the same time");
+    }
     shared_ptr<DatabaseAccessor> update_accessor(accessor_->clone());
     const std::pair<bool, int> zone(update_accessor->startUpdateZone(
                                         name.toText(), replace));
@@ -995,7 +1067,7 @@ DatabaseClient::getUpdater(const isc::dns::Name& name, bool replace) const {
     }
 
     return (ZoneUpdaterPtr(new DatabaseUpdater(update_accessor, zone.second,
-                                               name, rrclass_)));
+                                               name, rrclass_, journaling)));
 }
 }
 }
diff --git a/src/lib/datasrc/database.h b/src/lib/datasrc/database.h
index b3fda6d..2ac10e7 100644
--- a/src/lib/datasrc/database.h
+++ b/src/lib/datasrc/database.h
@@ -274,6 +274,56 @@ public:
      */
     virtual IteratorContextPtr getAllRecords(int id) const = 0;
 
+    /**
+     * \brief Creates an iterator context for a set of differences.
+     *
+     * Returns an IteratorContextPtr that contains all difference records for
+     * the given zone between two versions of a zone.
+     *
+     * The difference records are the set of records that would appear in an
+     * IXFR serving a request for the difference between two versions of a zone.
+     * The records are returned in the same order as they would be in the IXFR.
+     * This means that if the the difference between versions of a zone with SOA
+     * serial numbers of "start" and "end" is required, and the zone contains
+     * the differences between serial number "start" to serial number
+     * "intermediate" and from serial number "intermediate" to serial number
+     * "end", the returned records will be (in order):
+     *
+     * \li SOA for serial "start"
+     * \li Records removed from the zone between versions "start" and
+     *     "intermediate" of the zone.  The order of these is not guaranteed.
+     * \li SOA for serial "intermediate"
+     * \li Records added to the zone between versions "start" and
+     *     "intermediate" of the zone.  The order of these is not guaranteed.
+     * \li SOA for serial "intermediate"
+     * \li Records removed from the zone between versions "intermediate" and
+     *     "end" of the zone.  The order of these is not guaranteed.
+     * \li SOA for serial "end"
+     * \li Records added to the zone between versions "intermediate" and "end"
+     *     of the zone. The order of these is not guaranteed.
+     *
+     * Note that there is no requirement that "start" be less than "end". Owing
+     * to serial number arithmetic, it is entirely possible that a later version
+     * of a zone will have a smaller SOA serial number than an earlier version.
+     *
+     * Each call to getNext() on the returned iterator should copy all
+     * column fields of the array that is passed, as defined in the
+     * RecordColumns enum.
+     *
+     * \exception any Since any implementation can be used, the caller should
+     *                expect any exception to be thrown.
+     *
+     * \param id The ID of the zone, returned from getZone().
+     * \param start The SOA serial number of the version of the zone from
+     *        which the difference sequence should start.
+     * \param end The SOA serial number of the version of the zone at which
+     *        the difference sequence should end.
+     *
+     * \return Newly created iterator context. Must not be NULL.
+     */
+    virtual IteratorContextPtr
+    getDiffs(int id, uint32_t start, uint32_t end) const = 0;
+
     /// Start a transaction for updating a zone.
     ///
     /// Each derived class version of this method starts a database
@@ -863,22 +913,24 @@ public:
      * \exception Anything else the underlying DatabaseConnection might
      *     want to throw.
      * \param name The origin of the zone to iterate.
-     * \param adjust_ttl If true, the iterator will treat RRs with the same
-     *                   name and type but different TTL values to be of the
-     *                   same RRset, and will adjust the TTL to the lowest
-     *                   value found. If false, it will consider the RR to
-     *                   belong to a different RRset.
+     * \param separate_rrs If true, the iterator will return each RR as a
+     *                     new RRset object. If false, the iterator will
+     *                     combine consecutive RRs with the name and type
+     *                     into 1 RRset. The capitalization of the RRset will
+     *                     be that of the first RR read, and TTLs will be
+     *                     adjusted to the lowest one found.
      * \return Shared pointer to the iterator (it will never be NULL)
      */
     virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
-                                        bool adjust_ttl = true) const;
+                                        bool separate_rrs = false) const;
 
     /// This implementation internally clones the accessor from the one
     /// used in the client and starts a separate transaction using the cloned
     /// accessor.  The returned updater will be able to work separately from
     /// the original client.
     virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
-                                      bool replace) const;
+                                      bool replace,
+                                      bool journaling = false) const;
 
 private:
     /// \brief The RR class that this client handles.
diff --git a/src/lib/datasrc/memory_datasrc.cc b/src/lib/datasrc/memory_datasrc.cc
index 6c0f589..1bf17f6 100644
--- a/src/lib/datasrc/memory_datasrc.cc
+++ b/src/lib/datasrc/memory_datasrc.cc
@@ -729,10 +729,14 @@ private:
     Domain::const_iterator dom_iterator_;
     const DomainTree& tree_;
     const DomainNode* node_;
+    // Only used when separate_rrs_ is true
+    RdataIteratorPtr rdata_iterator_;
+    bool separate_rrs_;
     bool ready_;
 public:
-    MemoryIterator(const DomainTree& tree, const Name& origin) :
+    MemoryIterator(const DomainTree& tree, const Name& origin, bool separate_rrs) :
         tree_(tree),
+        separate_rrs_(separate_rrs),
         ready_(true)
     {
         // Find the first node (origin) and preserve the node chain for future
@@ -747,6 +751,9 @@ public:
         // Initialize the iterator if there's somewhere to point to
         if (node_ != NULL && node_->getData() != DomainPtr()) {
             dom_iterator_ = node_->getData()->begin();
+            if (separate_rrs_ && dom_iterator_ != node_->getData()->end()) {
+                rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+            }
         }
     }
 
@@ -766,6 +773,10 @@ public:
             // if the map is empty or not
             if (node_ != NULL && node_->getData() != NULL) {
                 dom_iterator_ = node_->getData()->begin();
+                // New RRset, so get a new rdata iterator
+                if (separate_rrs_) {
+                    rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+                }
             }
         }
         if (node_ == NULL) {
@@ -773,12 +784,35 @@ public:
             ready_ = false;
             return (ConstRRsetPtr());
         }
-        // The iterator points to the next yet unused RRset now
-        ConstRRsetPtr result(dom_iterator_->second);
-        // This one is used, move it to the next time for next call
-        ++dom_iterator_;
 
-        return (result);
+        if (separate_rrs_) {
+            // For separate rrs, reconstruct a new RRset with just the
+            // 'current' rdata
+            RRsetPtr result(new RRset(dom_iterator_->second->getName(),
+                                      dom_iterator_->second->getClass(),
+                                      dom_iterator_->second->getType(),
+                                      dom_iterator_->second->getTTL()));
+            result->addRdata(rdata_iterator_->getCurrent());
+            rdata_iterator_->next();
+            if (rdata_iterator_->isLast()) {
+                // all used up, next.
+                ++dom_iterator_;
+                // New RRset, so get a new rdata iterator, but only if this
+                // was not the final RRset in the chain
+                if (dom_iterator_ != node_->getData()->end()) {
+                    rdata_iterator_ = dom_iterator_->second->getRdataIterator();
+                }
+            }
+            return (result);
+        } else {
+            // The iterator points to the next yet unused RRset now
+            ConstRRsetPtr result(dom_iterator_->second);
+
+            // This one is used, move it to the next time for next call
+            ++dom_iterator_;
+
+            return (result);
+        }
     }
 
     virtual ConstRRsetPtr getSOA() const {
@@ -789,11 +823,7 @@ public:
 } // End of anonymous namespace
 
 ZoneIteratorPtr
-InMemoryClient::getIterator(const Name& name, bool) const {
-    // note: adjust_ttl argument is ignored, as the RRsets are already
-    // individually stored, and hence cannot have different TTLs anymore at
-    // this point
-
+InMemoryClient::getIterator(const Name& name, bool separate_rrs) const {
     ZoneTable::FindResult result(impl_->zone_table.findZone(name));
     if (result.code != result::SUCCESS) {
         isc_throw(DataSourceError, "No such zone: " + name.toText());
@@ -811,11 +841,12 @@ InMemoryClient::getIterator(const Name& name, bool) const {
         isc_throw(Unexpected, "The zone at " + name.toText() +
                   " is not InMemoryZoneFinder");
     }
-    return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name)));
+    return (ZoneIteratorPtr(new MemoryIterator(zone->impl_->domains_, name,
+                                               separate_rrs)));
 }
 
 ZoneUpdaterPtr
-InMemoryClient::getUpdater(const isc::dns::Name&, bool) const {
+InMemoryClient::getUpdater(const isc::dns::Name&, bool, bool) const {
     isc_throw(isc::NotImplemented, "Update attempt on in memory data source");
 }
 
diff --git a/src/lib/datasrc/memory_datasrc.h b/src/lib/datasrc/memory_datasrc.h
index 1b6c120..a9764fe 100644
--- a/src/lib/datasrc/memory_datasrc.h
+++ b/src/lib/datasrc/memory_datasrc.h
@@ -273,7 +273,7 @@ public:
 
     /// \brief Implementation of the getIterator method
     virtual ZoneIteratorPtr getIterator(const isc::dns::Name& name,
-                                        bool adjust_ttl = true) const;
+                                        bool separate_rrs = false) const;
 
     /// In-memory data source is read-only, so this derived method will
     /// result in a NotImplemented exception.
@@ -284,7 +284,8 @@ public:
     /// to update via its updater (this may or may not be a good idea and
     /// is subject to further discussions).
     virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name& name,
-                                      bool replace) const;
+                                      bool replace, bool journaling = false)
+        const;
 
 private:
     // TODO: Do we still need the PImpl if nobody should manipulate this class
diff --git a/src/lib/datasrc/sqlite3_accessor.cc b/src/lib/datasrc/sqlite3_accessor.cc
index 01b9f41..fb2ffef 100644
--- a/src/lib/datasrc/sqlite3_accessor.cc
+++ b/src/lib/datasrc/sqlite3_accessor.cc
@@ -23,6 +23,7 @@
 #include <datasrc/logger.h>
 #include <datasrc/data_source.h>
 #include <datasrc/factory.h>
+#include <datasrc/database.h>
 #include <util/filename.h>
 
 using namespace std;
@@ -54,7 +55,10 @@ enum StatementID {
     FIND_PREVIOUS = 10,
     ADD_RECORD_DIFF = 11,
     GET_RECORD_DIFF = 12,       // This is temporary for testing "add diff"
-    NUM_STATEMENTS = 13
+    LOW_DIFF_ID = 13,
+    HIGH_DIFF_ID = 14,
+    DIFF_RECS = 15,
+    NUM_STATEMENTS = 16
 };
 
 const char* const text_statements[NUM_STATEMENTS] = {
@@ -62,33 +66,48 @@ const char* const text_statements[NUM_STATEMENTS] = {
     // specifically chosen to match the enum values in RecordColumns
     "SELECT id FROM zones WHERE name=?1 AND rdclass = ?2", // ZONE
     "SELECT rdtype, ttl, sigtype, rdata FROM records "     // ANY
-    "WHERE zone_id=?1 AND name=?2",
+        "WHERE zone_id=?1 AND name=?2",
     "SELECT rdtype, ttl, sigtype, rdata " // ANY_SUB
-    "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
+        "FROM records WHERE zone_id=?1 AND name LIKE (\"%.\" || ?2)",
     "BEGIN",                    // BEGIN
     "COMMIT",                   // COMMIT
     "ROLLBACK",                 // ROLLBACK
     "DELETE FROM records WHERE zone_id=?1", // DEL_ZONE_RECORDS
     "INSERT INTO records "      // ADD_RECORD
-    "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
-    "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+        "(zone_id, name, rname, ttl, rdtype, sigtype, rdata) "
+        "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
     "DELETE FROM records WHERE zone_id=?1 AND name=?2 " // DEL_RECORD
-    "AND rdtype=?3 AND rdata=?4",
+        "AND rdtype=?3 AND rdata=?4",
     "SELECT rdtype, ttl, sigtype, rdata, name FROM records " // ITERATE
-    "WHERE zone_id = ?1 ORDER BY rname, rdtype",
+        "WHERE zone_id = ?1 ORDER BY rname, rdtype",
     /*
      * This one looks for previous name with NSEC record. It is done by
      * using the reversed name. The NSEC is checked because we need to
      * skip glue data, which don't have the NSEC.
      */
     "SELECT name FROM records " // FIND_PREVIOUS
-    "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
-    "rname < $2 ORDER BY rname DESC LIMIT 1",
+        "WHERE zone_id=?1 AND rdtype = 'NSEC' AND "
+        "rname < $2 ORDER BY rname DESC LIMIT 1",
     "INSERT INTO diffs "        // ADD_RECORD_DIFF
-    "(zone_id, version, operation, name, rrtype, ttl, rdata) "
-    "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)"
-    , "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
-    "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation"
+        "(zone_id, version, operation, name, rrtype, ttl, rdata) "
+        "VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
+    "SELECT name, rrtype, ttl, rdata, version, operation " // GET_RECORD_DIFF
+        "FROM diffs WHERE zone_id = ?1 ORDER BY id, operation",
+
+    // Two statements to select the lowest ID and highest ID in a set of
+    // differences.
+    "SELECT id FROM diffs "     // LOW_DIFF_ID
+        "WHERE zone_id=?1 AND version=?2 and OPERATION=?3 "
+        "ORDER BY id ASC LIMIT 1",
+    "SELECT id FROM diffs "     // HIGH_DIFF_ID
+        "WHERE zone_id=?1 AND version=?2 and OPERATION=?3 "
+        "ORDER BY id DESC LIMIT 1",
+
+    // In the next statement, note the redundant ID.  This is to ensure
+    // that the columns match the column IDs passed to the iterator
+    "SELECT rrtype, ttl, id, rdata, name FROM diffs "   // DIFF_RECS
+        "WHERE zone_id=?1 AND id>=?2 and id<=?3 "
+        "ORDER BY id ASC"
 };
 
 struct SQLite3Parameters {
@@ -231,23 +250,26 @@ const char* const SCHEMA_LIST[] = {
     "dnssec BOOLEAN NOT NULL DEFAULT 0)",
     "CREATE INDEX zones_byname ON zones (name)",
     "CREATE TABLE records (id INTEGER PRIMARY KEY, "
-    "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
-    "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
-    "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
-    "rdata STRING NOT NULL)",
+        "zone_id INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
+        "rname STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
+        "rdtype STRING NOT NULL COLLATE NOCASE, sigtype STRING COLLATE NOCASE, "
+        "rdata STRING NOT NULL)",
     "CREATE INDEX records_byname ON records (name)",
     "CREATE INDEX records_byrname ON records (rname)",
     "CREATE TABLE nsec3 (id INTEGER PRIMARY KEY, zone_id INTEGER NOT NULL, "
-    "hash STRING NOT NULL COLLATE NOCASE, "
-    "owner STRING NOT NULL COLLATE NOCASE, "
-    "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
-    "rdata STRING NOT NULL)",
+        "hash STRING NOT NULL COLLATE NOCASE, "
+        "owner STRING NOT NULL COLLATE NOCASE, "
+        "ttl INTEGER NOT NULL, rdtype STRING NOT NULL COLLATE NOCASE, "
+        "rdata STRING NOT NULL)",
     "CREATE INDEX nsec3_byhash ON nsec3 (hash)",
     "CREATE TABLE diffs (id INTEGER PRIMARY KEY, "
-    "zone_id INTEGER NOT NULL, version INTEGER NOT NULL, "
-    "operation INTEGER NOT NULL, name STRING NOT NULL COLLATE NOCASE, "
-    "rrtype STRING NOT NULL COLLATE NOCASE, ttl INTEGER NOT NULL, "
-    "rdata STRING NOT NULL)",
+        "zone_id INTEGER NOT NULL, "
+        "version INTEGER NOT NULL, "
+        "operation INTEGER NOT NULL, "
+        "name STRING NOT NULL COLLATE NOCASE, "
+        "rrtype STRING NOT NULL COLLATE NOCASE, "
+        "ttl INTEGER NOT NULL, "
+        "rdata STRING NOT NULL)",
     NULL
 };
 
@@ -558,6 +580,9 @@ private:
     const std::string name_;
 };
 
+
+// Methods to retrieve the various iterators
+
 DatabaseAccessor::IteratorContextPtr
 SQLite3Accessor::getRecords(const std::string& name, int id,
                             bool subdomains) const
@@ -571,6 +596,257 @@ SQLite3Accessor::getAllRecords(int id) const {
     return (IteratorContextPtr(new Context(shared_from_this(), id)));
 }
 
+
+/// \brief Difference Iterator
+///
+/// This iterator is used to search through the differences table for the
+/// resouce records making up an IXFR between two versions of a zone.
+
+class SQLite3Accessor::DiffContext : public DatabaseAccessor::IteratorContext {
+public:
+
+    /// \brief Constructor
+    ///
+    /// Constructs the iterator for the difference sequence.  It is
+    /// passed two parameters, the first and last versions in the difference
+    /// sequence.  Note that because of serial number rollover, it may well
+    /// be that the start serial number is greater than the end one.
+    ///
+    /// \param zone_id ID of the zone (in the zone table)
+    /// \param start Serial number of first version in difference sequence
+    /// \param end Serial number of last version in difference sequence
+    ///
+    /// \exception any A number of exceptions can be expected
+    DiffContext(const boost::shared_ptr<const SQLite3Accessor>& accessor,
+                int zone_id, uint32_t start, uint32_t end) :
+        accessor_(accessor),
+        last_status_(SQLITE_ROW)
+    {
+        try {
+            int low_id = findIndex(LOW_DIFF_ID, zone_id, start, DIFF_DELETE);
+            int high_id = findIndex(HIGH_DIFF_ID, zone_id, end, DIFF_ADD);
+
+            // Prepare the statement that will return data values
+            reset(DIFF_RECS);
+            bindInt(DIFF_RECS, 1, zone_id);
+            bindInt(DIFF_RECS, 2, low_id);
+            bindInt(DIFF_RECS, 3, high_id);
+
+        } catch (...) {
+            // Something wrong, clear up everything.
+            accessor_->dbparameters_->finalizeStatements();
+            throw;
+        }
+    }
+
+    /// \brief Destructor
+    virtual ~DiffContext()
+    {}
+
+    /// \brief Get Next Diff Record
+    ///
+    /// Returns the next difference record in the difference sequence.
+    ///
+    /// \param data Array of std::strings COLUMN_COUNT long.  The results
+    ///        are returned in this.
+    ///
+    /// \return bool true if data is returned, false if not.
+    ///
+    /// \exceptions any Varied
+    bool getNext(std::string (&data)[COLUMN_COUNT]) {
+
+        if (last_status_ != SQLITE_DONE) {
+            // Last call (if any) didn't reach end of result set, so we
+            // can read another row from it.
+            //
+            // Get a pointer to the statement for brevity (this does not
+            // transfer ownership of the statement to this class, so there is
+            // no need to tidy up after we have finished using it).
+            sqlite3_stmt* stmt =
+                accessor_->dbparameters_->getStatement(DIFF_RECS);
+
+            const int rc(sqlite3_step(stmt));
+            if (rc == SQLITE_ROW) {
+                // Copy the data across to the output array
+                copyColumn(DIFF_RECS, data, TYPE_COLUMN);
+                copyColumn(DIFF_RECS, data, TTL_COLUMN);
+                copyColumn(DIFF_RECS, data, NAME_COLUMN);
+                copyColumn(DIFF_RECS, data, RDATA_COLUMN);
+
+            } else if (rc != SQLITE_DONE) {
+                isc_throw(DataSourceError,
+                          "Unexpected failure in sqlite3_step: " <<
+                          sqlite3_errmsg(accessor_->dbparameters_->db_));
+            }
+            last_status_ = rc;
+        }
+        return (last_status_ == SQLITE_ROW);
+    }
+
+private:
+
+    /// \brief Reset prepared statement
+    ///
+    /// Sets up the statement so that new parameters can be attached to it and
+    /// that it can be used to query for another difference sequence.
+    ///
+    /// \param stindex Index of prepared statement to which to bind
+    void reset(int stindex) {
+        sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+        if ((sqlite3_reset(stmt) != SQLITE_OK) ||
+            (sqlite3_clear_bindings(stmt) != SQLITE_OK)) {
+            isc_throw(SQLite3Error, "Could not clear statement bindings in '" <<
+                      text_statements[stindex] << "': " <<
+                      sqlite3_errmsg(accessor_->dbparameters_->db_));
+        }
+    }
+
+    /// \brief Bind Int
+    ///
+    /// Binds an integer to a specific variable in a prepared statement.
+    ///
+    /// \param stindex Index of prepared statement to which to bind
+    /// \param varindex Index of variable to which to bind
+    /// \param value Value of variable to bind
+    /// \exception SQLite3Error on an error
+    void bindInt(int stindex, int varindex, sqlite3_int64 value) {
+        if (sqlite3_bind_int64(accessor_->dbparameters_->getStatement(stindex),
+                             varindex, value) != SQLITE_OK) {
+            isc_throw(SQLite3Error, "Could not bind value to parameter " <<
+                      varindex << " in statement '" <<
+                      text_statements[stindex] << "': " <<
+                      sqlite3_errmsg(accessor_->dbparameters_->db_));
+        }
+    }
+
+    ///\brief Get Single Value
+    ///
+    /// Executes a prepared statement (which has parameters bound to it)
+    /// for which the result of a single value is expected.
+    ///
+    /// \param stindex Index of prepared statement in statement table.
+    ///
+    /// \return Value of SELECT.
+    ///
+    /// \exception TooMuchData Multiple rows returned when one expected
+    /// \exception TooLittleData Zero rows returned when one expected
+    /// \exception DataSourceError SQLite3-related error
+    int getSingleValue(StatementID stindex) {
+
+        // Get a pointer to the statement for brevity (does not transfer
+        // resources)
+        sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+
+        // Execute the data.  Should be just one result
+        int rc = sqlite3_step(stmt);
+        int result = -1;
+        if (rc == SQLITE_ROW) {
+
+            // Got some data, extract the value
+            result = sqlite3_column_int(stmt, 0);
+            rc = sqlite3_step(stmt);
+            if (rc == SQLITE_DONE) {
+
+                // All OK, exit with the value.
+                return (result);
+
+            } else if (rc == SQLITE_ROW) {
+                isc_throw(TooMuchData, "request to return one value from "
+                          "diffs table returned multiple values");
+            }
+        } else if (rc == SQLITE_DONE) {
+
+            // No data in the table.  A bare exception with no explanation is
+            // thrown, as it will be replaced by a more informative one by
+            // the caller.
+            isc_throw(TooLittleData, "");
+        }
+
+        // We get here on an error.
+        isc_throw(DataSourceError, "could not get data from diffs table: " <<
+                  sqlite3_errmsg(accessor_->dbparameters_->db_));
+
+        // Keep the compiler happy with a return value.
+        return (result);
+    }
+
+    /// \brief Find index
+    ///
+    /// Executes the prepared statement locating the high or low index in
+    /// the diffs table and returns that index.
+    ///
+    /// \param stmt_id Index of the prepared statement to execute
+    /// \param zone_id ID of the zone for which the index is being sought
+    /// \param serial Zone serial number for which an index is being sought.
+    /// \param diff Code to delete record additions or deletions
+    ///
+    /// \return int ID of the row in the difss table corresponding to the
+    ///         statement.
+    ///
+    /// \exception TooLittleData Internal error, no result returned when one
+    ///            was expected.
+    /// \exception NoSuchSerial Serial number not found.
+    /// \exception NoDiffsData No data for this zone found in diffs table
+    int findIndex(StatementID stindex, int zone_id, uint32_t serial, int diff) {
+
+        // Set up the statement
+        reset(stindex);
+        bindInt(stindex, 1, zone_id);
+        bindInt(stindex, 2, serial);
+        bindInt(stindex, 3, diff);
+
+        // Execute the statement
+        int result = -1;
+        try {
+            result = getSingleValue(stindex);
+
+        } catch (const TooLittleData&) {
+
+            // No data returned but the SQL query succeeded.  Only possibility
+            // is that there is no entry in the differences table for the given
+            // zone and version.
+            isc_throw(NoSuchSerial, "No entry in differences table for " <<
+                      " zone ID " << zone_id << ", serial number " << serial);
+        }
+
+        return (result);
+    }
+
+    /// \brief Copy Column to Output
+    ///
+    /// Copies the textual data in the result set to the specified column
+    /// in the output.
+    ///
+    /// \param stindex Index of prepared statement used to access data
+    /// \param data Array of columns passed to getNext
+    /// \param column Column of output to copy
+    void copyColumn(StatementID stindex, std::string (&data)[COLUMN_COUNT],
+                    int column) {
+
+        // Get a pointer to the statement for brevity (does not transfer
+        // resources)
+        sqlite3_stmt* stmt = accessor_->dbparameters_->getStatement(stindex);
+        data[column] = convertToPlainChar(sqlite3_column_text(stmt,
+                                                              column),
+                                          accessor_->dbparameters_->db_);
+    }
+
+    // Attributes
+
+    boost::shared_ptr<const SQLite3Accessor> accessor_; // Accessor object
+    int last_status_;           // Last status received from sqlite3_step
+};
+
+// ... and return the iterator
+
+DatabaseAccessor::IteratorContextPtr
+SQLite3Accessor::getDiffs(int id, uint32_t start, uint32_t end) const {
+    return (IteratorContextPtr(new DiffContext(shared_from_this(), id, start,
+                               end)));
+}
+
+
+
 pair<bool, int>
 SQLite3Accessor::startUpdateZone(const string& zone_name, const bool replace) {
     if (dbparameters_->updating_zone) {
diff --git a/src/lib/datasrc/sqlite3_accessor.h b/src/lib/datasrc/sqlite3_accessor.h
index 6b5369c..250b46a 100644
--- a/src/lib/datasrc/sqlite3_accessor.h
+++ b/src/lib/datasrc/sqlite3_accessor.h
@@ -17,6 +17,7 @@
 #define __DATASRC_SQLITE3_ACCESSOR_H
 
 #include <datasrc/database.h>
+#include <datasrc/data_source.h>
 
 #include <exceptions/exceptions.h>
 
@@ -40,12 +41,37 @@ namespace datasrc {
  * It might mean corrupt database file, invalid request or that something is
  * rotten in the library.
  */
-class SQLite3Error : public Exception {
+class SQLite3Error : public DataSourceError {
 public:
     SQLite3Error(const char* file, size_t line, const char* what) :
-        isc::Exception(file, line, what) {}
+        DataSourceError(file, line, what) {}
 };
 
+/**
+ * \brief Too Much Data
+ *
+ * Thrown if a query expecting a certain number of rows back returned too
+ * many rows.
+ */
+class TooMuchData : public DataSourceError {
+public:
+    TooMuchData(const char* file, size_t line, const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
+/**
+ * \brief Too Little Data
+ *
+ * Thrown if a query expecting a certain number of rows back returned too
+ * few rows (including none).
+ */
+class TooLittleData : public DataSourceError {
+public:
+    TooLittleData(const char* file, size_t line, const char* what) :
+        DataSourceError(file, line, what) {}
+};
+
+
 struct SQLite3Parameters;
 
 /**
@@ -128,6 +154,27 @@ public:
      */
     virtual IteratorContextPtr getAllRecords(int id) const;
 
+    /** \brief Creates an iterator context for a set of differences.
+     *
+     * Implements the getDiffs() method from DatabaseAccessor
+     *
+     * \exception NoSuchSerial if either of the versions do not exist in
+     *            the difference table.
+     * \exception SQLite3Error if there is an sqlite3 error when performing
+     *            the query
+     *
+     * \param id The ID of the zone, returned from getZone().
+     * \param start The SOA serial number of the version of the zone from
+     *        which the difference sequence should start.
+     * \param end The SOA serial number of the version of the zone at which
+     *        the difference sequence should end.
+     *
+     * \return Iterator containing difference records.
+     */
+    virtual IteratorContextPtr
+    getDiffs(int id, uint32_t start, uint32_t end) const;
+
+
     virtual std::pair<bool, int> startUpdateZone(const std::string& zone_name,
                                                  bool replace);
 
@@ -192,14 +239,20 @@ private:
     const std::string filename_;
     /// \brief The class for which the queries are done
     const std::string class_;
+    /// \brief Database name
+    const std::string database_name_;
+
     /// \brief Opens the database
     void open(const std::string& filename);
     /// \brief Closes the database
     void close();
-    /// \brief SQLite3 implementation of IteratorContext
+
+    /// \brief SQLite3 implementation of IteratorContext for all records
     class Context;
     friend class Context;
-    const std::string database_name_;
+    /// \brief SQLite3 implementation of IteratorContext for differences
+    class DiffContext;
+    friend class DiffContext;
 };
 
 /// \brief Creates an instance of the SQlite3 datasource client
diff --git a/src/lib/datasrc/tests/Makefile.am b/src/lib/datasrc/tests/Makefile.am
index 32f52b6..70f2999 100644
--- a/src/lib/datasrc/tests/Makefile.am
+++ b/src/lib/datasrc/tests/Makefile.am
@@ -109,3 +109,4 @@ EXTRA_DIST += testdata/test-root.sqlite3
 EXTRA_DIST += testdata/test.sqlite3
 EXTRA_DIST += testdata/test.sqlite3.nodiffs
 EXTRA_DIST += testdata/rwtest.sqlite3
+EXTRA_DIST += testdata/diffs.sqlite3
diff --git a/src/lib/datasrc/tests/client_unittest.cc b/src/lib/datasrc/tests/client_unittest.cc
index 5b2c91a..ade6fc7 100644
--- a/src/lib/datasrc/tests/client_unittest.cc
+++ b/src/lib/datasrc/tests/client_unittest.cc
@@ -32,7 +32,9 @@ public:
     virtual FindResult findZone(const isc::dns::Name&) const {
         return (FindResult(result::NOTFOUND, ZoneFinderPtr()));
     }
-    virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool) const {
+    virtual ZoneUpdaterPtr getUpdater(const isc::dns::Name&, bool, bool)
+        const
+    {
         return (ZoneUpdaterPtr());
     }
 };
diff --git a/src/lib/datasrc/tests/database_unittest.cc b/src/lib/datasrc/tests/database_unittest.cc
index 1a471bf..007c634 100644
--- a/src/lib/datasrc/tests/database_unittest.cc
+++ b/src/lib/datasrc/tests/database_unittest.cc
@@ -13,6 +13,7 @@
 // PERFORMANCE OF THIS SOFTWARE.
 
 #include <boost/shared_ptr.hpp>
+#include <boost/lexical_cast.hpp>
 
 #include <gtest/gtest.h>
 
@@ -37,6 +38,7 @@ using namespace std;
 // for some systems.
 using boost::shared_ptr;
 using boost::dynamic_pointer_cast;
+using boost::lexical_cast;
 using namespace isc::dns;
 
 namespace {
@@ -255,6 +257,11 @@ public:
                   "This database datasource can't be iterated");
     }
 
+    virtual IteratorContextPtr getDiffs(int, uint32_t, uint32_t) const {
+        isc_throw(isc::NotImplemented,
+                  "This database datasource can't be iterated");
+    }
+
     virtual std::string findPreviousName(int, const std::string&) const {
         isc_throw(isc::NotImplemented,
                   "This data source doesn't support DNSSEC");
@@ -264,6 +271,52 @@ private:
 
 };
 
+/**
+ * Single journal entry in the mock database.
+ *
+ * All the members there are public for simplicity, as it only stores data.
+ * We use the implicit constructor and operator. The members can't be const
+ * because of the assignment operator (used in the vectors).
+ */
+struct JournalEntry {
+    JournalEntry(int id, uint32_t serial,
+                 DatabaseAccessor::DiffOperation operation,
+                 const std::string (&data)[DatabaseAccessor::DIFF_PARAM_COUNT])
+        : id_(id), serial_(serial), operation_(operation)
+    {
+        data_[DatabaseAccessor::DIFF_NAME] = data[DatabaseAccessor::DIFF_NAME];
+        data_[DatabaseAccessor::DIFF_TYPE] = data[DatabaseAccessor::DIFF_TYPE];
+        data_[DatabaseAccessor::DIFF_TTL] = data[DatabaseAccessor::DIFF_TTL];
+        data_[DatabaseAccessor::DIFF_RDATA] =
+            data[DatabaseAccessor::DIFF_RDATA];
+    }
+    JournalEntry(int id, uint32_t serial,
+                 DatabaseAccessor::DiffOperation operation,
+                 const std::string& name, const std::string& type,
+                 const std::string& ttl, const std::string& rdata):
+        id_(id), serial_(serial), operation_(operation)
+    {
+        data_[DatabaseAccessor::DIFF_NAME] = name;
+        data_[DatabaseAccessor::DIFF_TYPE] = type;
+        data_[DatabaseAccessor::DIFF_TTL] = ttl;
+        data_[DatabaseAccessor::DIFF_RDATA] = rdata;
+    }
+    int id_;
+    uint32_t serial_;
+    DatabaseAccessor::DiffOperation operation_;
+    std::string data_[DatabaseAccessor::DIFF_PARAM_COUNT];
+    bool operator==(const JournalEntry& other) const {
+        for (size_t i(0); i < DatabaseAccessor::DIFF_PARAM_COUNT; ++ i) {
+            if (data_[i] != other.data_[i]) {
+                return false;
+            }
+        }
+        // No need to check data here, checked above
+        return (id_ == other.id_ && serial_ == other.serial_ &&
+                operation_ == other.operation_);
+    }
+};
+
 /*
  * A virtual database accessor that pretends it contains single zone --
  * example.org.
@@ -288,6 +341,7 @@ public:
         readonly_records_ = &readonly_records_master_;
         update_records_ = &update_records_master_;
         empty_records_ = &empty_records_master_;
+        journal_entries_ = &journal_entries_master_;
         fillData();
     }
 
@@ -296,6 +350,7 @@ public:
         cloned_accessor->readonly_records_ = &readonly_records_master_;
         cloned_accessor->update_records_ = &update_records_master_;
         cloned_accessor->empty_records_ = &empty_records_master_;
+        cloned_accessor->journal_entries_ = &journal_entries_master_;
         latest_clone_ = cloned_accessor;
         return (cloned_accessor);
     }
@@ -544,7 +599,13 @@ public:
             *update_records_ = *readonly_records_;
         }
 
-        return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+        if (zone_name == "bad.example.org.") {
+            return (pair<bool, int>(true, -1));
+        } else if (zone_name == "null.example.org.") {
+            return (pair<bool, int>(true, 13));
+        } else {
+            return (pair<bool, int>(true, WRITABLE_ZONE_ID));
+        }
     }
     virtual void commit() {
         *readonly_records_ = *update_records_;
@@ -658,6 +719,30 @@ public:
             isc_throw(isc::Unexpected, "Unknown zone ID");
         }
     }
+    virtual void addRecordDiff(int id, uint32_t serial,
+                               DiffOperation operation,
+                               const std::string (&data)[DIFF_PARAM_COUNT])
+    {
+        if (id == 13) { // The null zone doesn't support journaling
+            isc_throw(isc::NotImplemented, "Test not implemented behaviour");
+        } else if (id == -1) { // Bad zone throws
+            isc_throw(DataSourceError, "Test error");
+        } else {
+            journal_entries_->push_back(JournalEntry(id, serial, operation,
+                                                    data));
+        }
+    }
+
+    // Check the journal is as expected and clear the journal
+    void checkJournal(const std::vector<JournalEntry> &expected) {
+        std::vector<JournalEntry> journal;
+        // Clean the journal, but keep local copy to check
+        journal.swap(*journal_entries_);
+        ASSERT_EQ(expected.size(), journal.size());
+        for (size_t i(0); i < expected.size(); ++ i) {
+            EXPECT_TRUE(expected[i] == journal[i]);
+        }
+    }
 
 private:
     // The following member variables are storage and/or update work space
@@ -677,6 +762,10 @@ private:
     const Domains empty_records_master_;
     const Domains* empty_records_;
 
+    // The journal data
+    std::vector<JournalEntry> journal_entries_master_;
+    std::vector<JournalEntry>* journal_entries_;
+
     // used as temporary storage after searchForRecord() and during
     // getNextRecord() calls, as well as during the building of the
     // fake data
@@ -794,6 +883,10 @@ public:
         rrset_.reset(new RRset(qname_, qclass_, qtype_, rrttl_));
         rrset_->addRdata(rdata::createRdata(rrset_->getType(),
                                             rrset_->getClass(), "192.0.2.2"));
+        soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+        soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+                                         "ns1.example.org. admin.example.org. "
+                                         "1234 3600 1800 2419200 7200"));
 
         // And its RRSIG.  Also different from the configured one.
         rrsigset_.reset(new RRset(qname_, qclass_, RRType::RRSIG(),
@@ -895,6 +988,7 @@ public:
     const RRTTL rrttl_;         // commonly used RR TTL
     RRsetPtr rrset_;            // for adding/deleting an RRset
     RRsetPtr rrsigset_;         // for adding/deleting an RRset
+    RRsetPtr soa_;              // for adding/deleting an RRset
 
     // update related objects to be tested
     ZoneUpdaterPtr updater_;
@@ -1246,8 +1340,8 @@ TEST_F(MockDatabaseClientTest, ttldiff) {
 
 // Unless we ask for individual RRs in our iterator request. In that case
 // every RR should go into its own 'rrset'
-TEST_F(MockDatabaseClientTest, ttldiff_no_adjust_ttl) {
-    ZoneIteratorPtr it(this->client_->getIterator(Name("example.org"), false));
+TEST_F(MockDatabaseClientTest, ttldiff_separate_rrs) {
+    ZoneIteratorPtr it(this->client_->getIterator(Name("example.org"), true));
 
     // Walk through the full iterator, we should see 1 rrset with name
     // ttldiff1.example.org., and two rdatas. Same for ttldiff2
@@ -2703,4 +2797,181 @@ TEST_F(MockDatabaseClientTest, badName) {
                  DataSourceError);
 }
 
+/*
+ * Test correct use of the updater with a journal.
+ */
+TEST_F(MockDatabaseClientTest, journal) {
+    updater_ = client_->getUpdater(zname_, false, true);
+    updater_->deleteRRset(*soa_);
+    updater_->deleteRRset(*rrset_);
+    soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+    soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+                                      "ns1.example.org. admin.example.org. "
+                                      "1235 3600 1800 2419200 7200"));
+    updater_->addRRset(*soa_);
+    updater_->addRRset(*rrset_);
+    ASSERT_NO_THROW(updater_->commit());
+    std::vector<JournalEntry> expected;
+    expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+                                    DatabaseAccessor::DIFF_DELETE,
+                                    "example.org.", "SOA", "3600",
+                                    "ns1.example.org. admin.example.org. "
+                                    "1234 3600 1800 2419200 7200"));
+    expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+                                    DatabaseAccessor::DIFF_DELETE,
+                                    "www.example.org.", "A", "3600",
+                                    "192.0.2.2"));
+    expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1235,
+                                    DatabaseAccessor::DIFF_ADD,
+                                    "example.org.", "SOA", "3600",
+                                    "ns1.example.org. admin.example.org. "
+                                    "1235 3600 1800 2419200 7200"));
+    expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1235,
+                                    DatabaseAccessor::DIFF_ADD,
+                                    "www.example.org.", "A", "3600",
+                                    "192.0.2.2"));
+    current_accessor_->checkJournal(expected);
+}
+
+/*
+ * Push multiple delete-add sequences. Checks it is allowed and all is
+ * saved.
+ */
+TEST_F(MockDatabaseClientTest, journalMultiple) {
+    std::vector<JournalEntry> expected;
+    updater_ = client_->getUpdater(zname_, false, true);
+    std::string soa_rdata = "ns1.example.org. admin.example.org. "
+        "1234 3600 1800 2419200 7200";
+    for (size_t i(1); i < 100; ++ i) {
+        // Remove the old SOA
+        updater_->deleteRRset(*soa_);
+        expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i - 1,
+                                        DatabaseAccessor::DIFF_DELETE,
+                                        "example.org.", "SOA", "3600",
+                                        soa_rdata));
+        // Create a new SOA
+        soa_rdata = "ns1.example.org. admin.example.org. " +
+            lexical_cast<std::string>(1234 + i) + " 3600 1800 2419200 7200";
+        soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+        soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+                                          soa_rdata));
+        // Add the new SOA
+        updater_->addRRset(*soa_);
+        expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234 + i,
+                                        DatabaseAccessor::DIFF_ADD,
+                                        "example.org.", "SOA", "3600",
+                                        soa_rdata));
+    }
+    ASSERT_NO_THROW(updater_->commit());
+    // Check the journal contains everything.
+    current_accessor_->checkJournal(expected);
+}
+
+/*
+ * Test passing a forbidden sequence to it and expect it to throw.
+ *
+ * Note that we implicitly test in different testcases (these for add and
+ * delete) that if the journaling is false, it doesn't expect the order.
+ */
+TEST_F(MockDatabaseClientTest, journalBadSequence) {
+    std::vector<JournalEntry> expected;
+    {
+        SCOPED_TRACE("Delete A before SOA");
+        updater_ = client_->getUpdater(zname_, false, true);
+        EXPECT_THROW(updater_->deleteRRset(*rrset_), isc::BadValue);
+        // Make sure the journal is empty now
+        current_accessor_->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Add before delete");
+        updater_ = client_->getUpdater(zname_, false, true);
+        EXPECT_THROW(updater_->addRRset(*soa_), isc::BadValue);
+        // Make sure the journal is empty now
+        current_accessor_->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Add A before SOA");
+        updater_ = client_->getUpdater(zname_, false, true);
+        // So far OK
+        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        // But we miss the add SOA here
+        EXPECT_THROW(updater_->addRRset(*rrset_), isc::BadValue);
+        // Make sure the journal contains only the first one
+        expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+                                        DatabaseAccessor::DIFF_DELETE,
+                                        "example.org.", "SOA", "3600",
+                                        "ns1.example.org. admin.example.org. "
+                                        "1234 3600 1800 2419200 7200"));
+        current_accessor_->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Commit before add");
+        updater_ = client_->getUpdater(zname_, false, true);
+        // So far OK
+        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        // Commit at the wrong time
+        EXPECT_THROW(updater_->commit(), isc::BadValue);
+        current_accessor_->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Delete two SOAs");
+        updater_ = client_->getUpdater(zname_, false, true);
+        // So far OK
+        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        // Delete the SOA again
+        EXPECT_THROW(updater_->deleteRRset(*soa_), isc::BadValue);
+        current_accessor_->checkJournal(expected);
+    }
+
+    {
+        SCOPED_TRACE("Add two SOAs");
+        updater_ = client_->getUpdater(zname_, false, true);
+        // So far OK
+        EXPECT_NO_THROW(updater_->deleteRRset(*soa_));
+        // Still OK
+        EXPECT_NO_THROW(updater_->addRRset(*soa_));
+        // But this one is added again
+        EXPECT_THROW(updater_->addRRset(*soa_), isc::BadValue);
+        expected.push_back(JournalEntry(WRITABLE_ZONE_ID, 1234,
+                                        DatabaseAccessor::DIFF_ADD,
+                                        "example.org.", "SOA", "3600",
+                                        "ns1.example.org. admin.example.org. "
+                                        "1234 3600 1800 2419200 7200"));
+        current_accessor_->checkJournal(expected);
+    }
+}
+
+/*
+ * Test it rejects to store journals when we request it together with
+ * erasing the whole zone.
+ */
+TEST_F(MockDatabaseClientTest, journalOnErase) {
+    EXPECT_THROW(client_->getUpdater(zname_, true, true), isc::BadValue);
+}
+
+/*
+ * Check that exception is propagated when the journal is not implemented.
+ */
+TEST_F(MockDatabaseClientTest, journalNotImplemented) {
+    updater_ = client_->getUpdater(Name("null.example.org"), false, true);
+    EXPECT_THROW(updater_->deleteRRset(*soa_), isc::NotImplemented);
+    soa_.reset(new RRset(zname_, qclass_, RRType::SOA(), rrttl_));
+    soa_->addRdata(rdata::createRdata(soa_->getType(), soa_->getClass(),
+                                      "ns1.example.org. admin.example.org. "
+                                      "1234 3600 1800 2419201 7200"));
+    EXPECT_THROW(updater_->addRRset(*soa_), isc::NotImplemented);
+}
+
+/*
+ * Test that different exceptions are propagated.
+ */
+TEST_F(MockDatabaseClientTest, journalException) {
+    updater_ = client_->getUpdater(Name("bad.example.org"), false, true);
+    EXPECT_THROW(updater_->deleteRRset(*soa_), DataSourceError);
+}
+
 }
diff --git a/src/lib/datasrc/tests/memory_datasrc_unittest.cc b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
index 2b854db..a1bd94e 100644
--- a/src/lib/datasrc/tests/memory_datasrc_unittest.cc
+++ b/src/lib/datasrc/tests/memory_datasrc_unittest.cc
@@ -177,6 +177,54 @@ TEST_F(InMemoryClientTest, iterator) {
     EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
 }
 
+TEST_F(InMemoryClientTest, iterator_separate_rrs) {
+    // Exactly the same tests as for iterator, but now with separate_rrs = true
+    // For the one that returns actual data, the AAAA should now be split up
+    boost::shared_ptr<InMemoryZoneFinder>
+        zone(new InMemoryZoneFinder(RRClass::IN(), Name("a")));
+    RRsetPtr aRRsetA(new RRset(Name("a"), RRClass::IN(), RRType::A(),
+                                  RRTTL(300)));
+    aRRsetA->addRdata(rdata::in::A("192.0.2.1"));
+    RRsetPtr aRRsetAAAA(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+                                  RRTTL(300)));
+    aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::1"));
+    aRRsetAAAA->addRdata(rdata::in::AAAA("2001:db8::2"));
+    RRsetPtr aRRsetAAAA_r1(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+                                  RRTTL(300)));
+    aRRsetAAAA_r1->addRdata(rdata::in::AAAA("2001:db8::1"));
+    RRsetPtr aRRsetAAAA_r2(new RRset(Name("a"), RRClass::IN(), RRType::AAAA(),
+                                  RRTTL(300)));
+    aRRsetAAAA_r2->addRdata(rdata::in::AAAA("2001:db8::2"));
+
+    RRsetPtr subRRsetA(new RRset(Name("sub.x.a"), RRClass::IN(), RRType::A(),
+                                  RRTTL(300)));
+    subRRsetA->addRdata(rdata::in::A("192.0.2.2"));
+    EXPECT_EQ(result::SUCCESS, memory_client.addZone(zone));
+
+    // First, the zone is not there, so it should throw
+    EXPECT_THROW(memory_client.getIterator(Name("b"), true), DataSourceError);
+    // This zone is not there either, even when there's a zone containing this
+    EXPECT_THROW(memory_client.getIterator(Name("x.a")), DataSourceError);
+    // Now, an empty zone
+    ZoneIteratorPtr iterator(memory_client.getIterator(Name("a"), true));
+    EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+    // It throws Unexpected when we are past the end
+    EXPECT_THROW(iterator->getNextRRset(), isc::Unexpected);
+
+    ASSERT_EQ(result::SUCCESS, zone->add(aRRsetA));
+    ASSERT_EQ(result::SUCCESS, zone->add(aRRsetAAAA));
+    ASSERT_EQ(result::SUCCESS, zone->add(subRRsetA));
+    // Check it with full zone, one by one.
+    // It should be in ascending order in case of InMemory data source
+    // (isn't guaranteed in general)
+    iterator = memory_client.getIterator(Name("a"), true);
+    EXPECT_EQ(aRRsetA->toText(), iterator->getNextRRset()->toText());
+    EXPECT_EQ(aRRsetAAAA_r1->toText(), iterator->getNextRRset()->toText());
+    EXPECT_EQ(aRRsetAAAA_r2->toText(), iterator->getNextRRset()->toText());
+    EXPECT_EQ(subRRsetA->toText(), iterator->getNextRRset()->toText());
+    EXPECT_EQ(ConstRRsetPtr(), iterator->getNextRRset());
+}
+
 TEST_F(InMemoryClientTest, getZoneCount) {
     EXPECT_EQ(0, memory_client.getZoneCount());
     memory_client.addZone(
diff --git a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
index 90b2ac1..61341f6 100644
--- a/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
+++ b/src/lib/datasrc/tests/sqlite3_accessor_unittest.cc
@@ -46,6 +46,7 @@ std::string SQLITE_DBNAME_EXAMPLE_ROOT = "sqlite3_test-root.sqlite3";
 std::string SQLITE_DBFILE_BROKENDB = TEST_DATA_DIR "/brokendb.sqlite3";
 std::string SQLITE_DBFILE_MEMORY = ":memory:";
 std::string SQLITE_DBFILE_EXAMPLE_ORG = TEST_DATA_DIR "/example.org.sqlite3";
+std::string SQLITE_DBFILE_DIFFS = TEST_DATA_DIR "/diffs.sqlite3";
 
 // The following file must be non existent and must be non"creatable";
 // the sqlite3 library will try to create a new DB file if it doesn't exist,
@@ -116,6 +117,26 @@ TEST_F(SQLite3AccessorTest, noClass) {
     EXPECT_FALSE(accessor->getZone("example.com.").first);
 }
 
+// Simple check to test that the sequence is valid.  It gets the next record
+// from the iterator, checks that it is not null, then checks the data.
+void checkRR(DatabaseAccessor::IteratorContextPtr& context,
+     std::string name, std::string ttl, std::string type, std::string rdata) {
+
+    // Mark where we are in the text
+    SCOPED_TRACE(name + " " + ttl + " " + type + " " + rdata);
+
+    std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+    // Get next record
+    EXPECT_TRUE(context->getNext(data));
+
+    // ... and check expected values
+    EXPECT_EQ(name, data[DatabaseAccessor::NAME_COLUMN]);
+    EXPECT_EQ(ttl, data[DatabaseAccessor::TTL_COLUMN]);
+    EXPECT_EQ(type, data[DatabaseAccessor::TYPE_COLUMN]);
+    EXPECT_EQ(rdata, data[DatabaseAccessor::RDATA_COLUMN]);
+}
+
 // This tests the iterator context
 TEST_F(SQLite3AccessorTest, iterator) {
     // Our test zone is conveniently small, but not empty
@@ -130,80 +151,138 @@ TEST_F(SQLite3AccessorTest, iterator) {
     ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context);
 
     std::string data[DatabaseAccessor::COLUMN_COUNT];
-    // Get and check the first and only record
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("MX", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("10 mail.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("ns1.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+    checkRR(context, "example.org.", "3600", "MX", "10 mail.example.org.");
+    checkRR(context, "example.org.", "3600", "NS", "ns1.example.org.");
+    checkRR(context, "example.org.", "3600", "NS", "ns2.example.org.");
+    checkRR(context, "example.org.", "3600", "NS", "ns3.example.org.");
+    checkRR(context, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200");
+    checkRR(context, "dname.example.org.", "3600", "DNAME",
+            "dname.example.info.");
+    checkRR(context, "dname2.foo.example.org.", "3600", "DNAME",
+            "dname2.example.info.");
+    checkRR(context, "mail.example.org.", "3600", "A", "192.0.2.10");
+    checkRR(context, "sub.example.org.", "3600", "NS", "ns.sub.example.org.");
+    checkRR(context, "ns.sub.example.org.", "3600", "A", "192.0.2.101");
+    checkRR(context, "www.example.org.", "3600", "A", "192.0.2.1");
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("ns2.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+    // Check there's no other
+    EXPECT_FALSE(context->getNext(data));
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("ns3.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+    // And make sure calling it again won't cause problems.
+    EXPECT_FALSE(context->getNext(data));
+}
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("SOA", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("ns1.example.org. admin.example.org. "
-              "1234 3600 1800 2419200 7200",
-              data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+// This tests the difference iterator context
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("dname.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("dname.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+// Test that at attempt to create a difference iterator for a serial number
+// that does not exist throws an exception.
+TEST_F(SQLite3AccessorTest, diffIteratorNoRecords) {
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("DNAME", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("dname2.example.info.", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("dname2.foo.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+    // Our test zone is conveniently small, but not empty
+    initAccessor(SQLITE_DBFILE_DIFFS, "IN");
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("192.0.2.10", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("mail.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+    const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+    ASSERT_TRUE(zone_info.first);
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("NS", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+    // Get the iterator context.  Difference of version 1 does not exist, so
+    // this should throw an exception.
+    EXPECT_THROW(accessor->getDiffs(zone_info.second, 1, 1234),
+                 isc::datasrc::NoSuchSerial);
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("192.0.2.101", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("ns.sub.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+    // Check that an invalid high version number also throws an exception.
+    EXPECT_THROW(accessor->getDiffs(zone_info.second, 1231, 2234),
+                 NoSuchSerial);
 
-    EXPECT_TRUE(context->getNext(data));
-    EXPECT_EQ("A", data[DatabaseAccessor::TYPE_COLUMN]);
-    EXPECT_EQ("3600", data[DatabaseAccessor::TTL_COLUMN]);
-    EXPECT_EQ("192.0.2.1", data[DatabaseAccessor::RDATA_COLUMN]);
-    EXPECT_EQ("www.example.org.", data[DatabaseAccessor::NAME_COLUMN]);
+    // Check that valid versions - but for the wrong zone which does not hold
+    // any records - also throws this exception.
+    EXPECT_THROW(accessor->getDiffs(zone_info.second + 42, 1231, 1234),
+                 NoSuchSerial);
 
-    // Check there's no other
-    EXPECT_FALSE(context->getNext(data));
+}
 
-    // And make sure calling it again won't cause problems.
-    EXPECT_FALSE(context->getNext(data));
+// Try to iterate through a valid sets of differences
+TEST_F(SQLite3AccessorTest, diffIteratorSequences) {
+    std::string data[DatabaseAccessor::COLUMN_COUNT];
+
+    // Our test zone is conveniently small, but not empty
+    initAccessor(SQLITE_DBFILE_DIFFS, "IN");
+    const std::pair<bool, int> zone_info(accessor->getZone("example.org."));
+    ASSERT_TRUE(zone_info.first);
+
+
+    // Check the difference sequence 1230-1231 (two adjacent differences)
+    // Get the iterator context
+    DatabaseAccessor::IteratorContextPtr
+        context1(accessor->getDiffs(zone_info.second, 1230, 1231));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context1);
+
+    // Change: 1230-1231
+    checkRR(context1, "example.org.", "1800", "SOA",
+            "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+    checkRR(context1, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+
+    // Check there's no other and that calling it again after no records doesn't
+    // cause problems.
+    EXPECT_FALSE(context1->getNext(data));
+    EXPECT_FALSE(context1->getNext(data));
+
+
+    // Check that the difference sequence 1231-1233 (two separate difference
+    // sequences) is OK.
+    DatabaseAccessor::IteratorContextPtr
+        context2(accessor->getDiffs(zone_info.second, 1231, 1233));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context2);
+
+    // Change 1231-1232
+    checkRR(context2, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+    checkRR(context2, "unused.example.org.", "3600", "A", "192.0.2.102");
+    checkRR(context2, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+    // Change: 1232-1233
+    checkRR(context2, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+    checkRR(context2, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+    checkRR(context2, "sub.example.org.", "3600", "NS", "ns.sub.example.org.");
+    checkRR(context2, "ns.sub.example.org.", "3600", "A", "192.0.2.101");
+
+    // Check there's no other and that calling it again after no records doesn't
+    // cause problems.
+    EXPECT_FALSE(context2->getNext(data));
+    EXPECT_FALSE(context2->getNext(data));
+
+
+    // Check that the difference sequence 4294967280 to 1230 (serial number
+    // rollover) is OK
+    DatabaseAccessor::IteratorContextPtr
+        context3(accessor->getDiffs(zone_info.second, 4294967280U, 1230));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context3);
+
+    // Change 4294967280 to 1230.
+    checkRR(context3, "example.org.", "3600", "SOA",
+            "ns1.example.org. admin.example.org. 4294967280 3600 1800 2419200 7200");
+    checkRR(context3, "www.example.org.", "3600", "A", "192.0.2.31");
+    checkRR(context3, "example.org.", "1800", "SOA",
+            "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+    checkRR(context3, "www.example.org.", "3600", "A", "192.0.2.21");
+
+    EXPECT_FALSE(context3->getNext(data));
+    EXPECT_FALSE(context3->getNext(data));
+
+
+    // Check the difference sequence 1233-1231 (versions in wrong order).  This
+    // should give an empty difference set.
+    DatabaseAccessor::IteratorContextPtr
+        context4(accessor->getDiffs(zone_info.second, 1233, 1231));
+    ASSERT_NE(DatabaseAccessor::IteratorContextPtr(), context2);
+
+    EXPECT_FALSE(context4->getNext(data));
+    EXPECT_FALSE(context4->getNext(data));
 }
 
 TEST(SQLite3Open, getDBNameExample2) {
diff --git a/src/lib/datasrc/tests/testdata/brokendb.sqlite3 b/src/lib/datasrc/tests/testdata/brokendb.sqlite3
index 7aad3af..63f3cc5 100644
Binary files a/src/lib/datasrc/tests/testdata/brokendb.sqlite3 and b/src/lib/datasrc/tests/testdata/brokendb.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/diffs.sqlite3 b/src/lib/datasrc/tests/testdata/diffs.sqlite3
new file mode 100644
index 0000000..3820563
Binary files /dev/null and b/src/lib/datasrc/tests/testdata/diffs.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/diffs_table.sql b/src/lib/datasrc/tests/testdata/diffs_table.sql
new file mode 100644
index 0000000..0e05207
--- /dev/null
+++ b/src/lib/datasrc/tests/testdata/diffs_table.sql
@@ -0,0 +1,123 @@
+-- Copyright (C) 2011  Internet Systems Consortium, Inc. ("ISC")
+--
+-- Permission to use, copy, modify, and/or distribute this software for any
+-- purpose with or without fee is hereby granted, provided that the above
+-- copyright notice and this permission notice appear in all copies.
+--
+-- THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+-- REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+-- AND FITNESS.  IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+-- INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+-- LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+-- OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+-- PERFORMANCE OF THIS SOFTWARE.
+
+-- \brief Create Differences Table
+--
+-- This is a short-term solution to creating the differences table for testing
+-- purposes.
+--
+-- It is assumed that the database used is a copy of the "example.org.sqlite3"
+-- database in this test directory.  The diffs table is created and populated
+-- with a set of RRs that purport to represent differences that end in the
+-- zone as is.
+--
+-- The file can be executed by the command:
+-- % sqlite3 -init <this-file> <database-file> ".quit"
+--
+-- The file gets executed as the set of SQL statements on the database file,
+-- the ".quit" on the command line then  getting executed to exit SQLite3.
+
+-- Create the diffs table
+DROP TABLE diffs;
+CREATE TABLE diffs (id INTEGER PRIMARY KEY,
+                    zone_id INTEGER NOT NULL,
+                    version INTEGER NOT NULL,
+                    operation INTEGER NOT NULL,
+                    name STRING NOT NULL COLLATE NOCASE,
+                    rrtype STRING NOT NULL COLLATE NOCASE,
+                    ttl INTEGER NOT NULL,
+                    rdata STRING NOT NULL);
+
+-- Populate it.  A dummy zone_id is used for now - this will be updated last of
+-- all.
+
+-- Change from 4294967280 (0xfffffff0) to 1230 to show serial rollover
+-- Update one record in the zone.
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 4294967280,  1, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 4294967280 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 4294967280, 1, "www.example.org.", "A", 3600, "192.0.2.31");
+
+-- Records added in version 1230 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1230, 0, "example.org.", "SOA", 1800,
+           "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1230, 0, "www.example.org.", "A", 3600, "192.0.2.21");
+
+-- Change 1230 to 1231: Change change a parameter of the SOA record
+-- Records removed from version 1230 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1230, 1, "example.org.", "SOA", 1800,
+           "ns1.example.org. admin.example.org. 1230 3600 1800 2419200 7200");
+
+-- Records added in version 1231 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1231, 0, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+
+
+-- Change 1231 to 1232: Remove one record, don't add anything.
+-- Records removed from version 1231 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1231, 1, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1231 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1231, 1, "unused.example.org.", "A", 3600, "192.0.2.102");
+
+-- Records added in version 1232 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1232, 0, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+-- Change 1232 to 1233: Add two, don't remove anything.
+-- Records removed from version 1232 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1232, 1, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1232 3600 1800 2419200 7200");
+
+-- Records added in version 1233 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 0, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 0, "sub.example.org.", "NS", 3600, "ns.sub.example.org.");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 0, "ns.sub.example.org.", "A", 3600, "192.0.2.101");
+
+
+-- Change 1233 to 1234: change addresses of two A records
+-- Records removed from version 1233 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 1, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1233 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 1, "www.example.org.", "A", 3600, "192.0.2.21");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1233, 1, "mail.example.org.", "A", 3600, "192.0.2.210");
+
+-- Records added in version 1234 of the zone
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1234, 0, "example.org.", "SOA", 3600,
+           "ns1.example.org. admin.example.org. 1234 3600 1800 2419200 7200");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1234, 0, "www.example.org.", "A", 3600, "192.0.2.1");
+INSERT INTO diffs(zone_id, version, operation, name, rrtype, ttl, rdata)
+    VALUES(1, 1234, 0, "mail.example.org.", "A", 3600, "192.0.2.10");
+
+-- Finally, update the zone_id in the diffs table with what is actually
+-- in the zone table.
+UPDATE diffs SET zone_id =
+   (SELECT id FROM ZONES LIMIT 1);
diff --git a/src/lib/datasrc/tests/testdata/example.org.sqlite3 b/src/lib/datasrc/tests/testdata/example.org.sqlite3
index 070012f..60e6e05 100644
Binary files a/src/lib/datasrc/tests/testdata/example.org.sqlite3 and b/src/lib/datasrc/tests/testdata/example.org.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 b/src/lib/datasrc/tests/testdata/example2.com.sqlite3
index 8d3bb34..9da7d0e 100644
Binary files a/src/lib/datasrc/tests/testdata/example2.com.sqlite3 and b/src/lib/datasrc/tests/testdata/example2.com.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 b/src/lib/datasrc/tests/testdata/rwtest.sqlite3
index ce95a1d..ccbb884 100644
Binary files a/src/lib/datasrc/tests/testdata/rwtest.sqlite3 and b/src/lib/datasrc/tests/testdata/rwtest.sqlite3 differ
diff --git a/src/lib/datasrc/tests/testdata/test-root.sqlite3 b/src/lib/datasrc/tests/testdata/test-root.sqlite3
index 7cc6195..c1dae47 100644
Binary files a/src/lib/datasrc/tests/testdata/test-root.sqlite3 and b/src/lib/datasrc/tests/testdata/test-root.sqlite3 differ
diff --git a/src/lib/datasrc/zone.h b/src/lib/datasrc/zone.h
index fa1c744..f824636 100644
--- a/src/lib/datasrc/zone.h
+++ b/src/lib/datasrc/zone.h
@@ -438,6 +438,10 @@ public:
     /// calls after \c commit() the implementation must throw a
     /// \c DataSourceError exception.
     ///
+    /// If journaling was requested when getting this updater, it will reject
+    /// to add the RRset if the squence doesn't look like and IXFR (see
+    /// DataSourceClient::getUpdater). In such case isc::BadValue is thrown.
+    ///
     /// \todo As noted above we may have to revisit the design details as we
     /// gain experiences:
     ///
@@ -454,6 +458,8 @@ public:
     ///
     /// \exception DataSourceError Called after \c commit(), RRset is invalid
     /// (see above), internal data source error
+    /// \exception isc::BadValue Journaling is enabled and the current RRset
+    ///   doesn't fit into the IXFR sequence (see above).
     /// \exception std::bad_alloc Resource allocation failure
     ///
     /// \param rrset The RRset to be added
@@ -503,6 +509,10 @@ public:
     /// calls after \c commit() the implementation must throw a
     /// \c DataSourceError exception.
     ///
+    /// If journaling was requested when getting this updater, it will reject
+    /// to add the RRset if the squence doesn't look like and IXFR (see
+    /// DataSourceClient::getUpdater). In such case isc::BadValue is thrown.
+    ///
     /// \todo As noted above we may have to revisit the design details as we
     /// gain experiences:
     ///
@@ -520,6 +530,8 @@ public:
     ///
     /// \exception DataSourceError Called after \c commit(), RRset is invalid
     /// (see above), internal data source error
+    /// \exception isc::BadValue Journaling is enabled and the current RRset
+    ///   doesn't fit into the IXFR sequence (see above).
     /// \exception std::bad_alloc Resource allocation failure
     ///
     /// \param rrset The RRset to be deleted
@@ -540,6 +552,8 @@ public:
     ///
     /// \exception DataSourceError Duplicate call of the method,
     /// internal data source error
+    /// \exception isc::BadValue Journaling is enabled and the update is not
+    ///    complete IXFR sequence.
     virtual void commit() = 0;
 };
 
diff --git a/src/lib/dns/rdata/generic/soa_6.cc b/src/lib/dns/rdata/generic/soa_6.cc
index 7ecd84f..875a957 100644
--- a/src/lib/dns/rdata/generic/soa_6.cc
+++ b/src/lib/dns/rdata/generic/soa_6.cc
@@ -106,6 +106,12 @@ SOA::toWire(AbstractMessageRenderer& renderer) const {
     renderer.writeData(numdata_, sizeof(numdata_));
 }
 
+uint32_t
+SOA::getSerial() const {
+    InputBuffer b(numdata_, sizeof(numdata_));
+    return (b.readUint32());
+}
+
 string
 SOA::toText() const {
     InputBuffer b(numdata_, sizeof(numdata_));
diff --git a/src/lib/dns/rdata/generic/soa_6.h b/src/lib/dns/rdata/generic/soa_6.h
index 3f6185e..4c6b6ec 100644
--- a/src/lib/dns/rdata/generic/soa_6.h
+++ b/src/lib/dns/rdata/generic/soa_6.h
@@ -34,6 +34,8 @@ public:
     SOA(const Name& mname, const Name& rname, uint32_t serial,
         uint32_t refresh, uint32_t retry, uint32_t expire,
         uint32_t minimum);
+    /// \brief Returns the serial stored in the SOA.
+    uint32_t getSerial() const;
 private:
     /// Note: this is a prototype version; we may reconsider
     /// this representation later.
diff --git a/src/lib/dns/tests/rdata_soa_unittest.cc b/src/lib/dns/tests/rdata_soa_unittest.cc
index 63fe1f7..17498eb 100644
--- a/src/lib/dns/tests/rdata_soa_unittest.cc
+++ b/src/lib/dns/tests/rdata_soa_unittest.cc
@@ -74,4 +74,9 @@ TEST_F(Rdata_SOA_Test, toText) {
     EXPECT_EQ("ns.example.com. root.example.com. "
               "2010012601 3600 300 3600000 1200", rdata_soa.toText());
 }
+
+TEST_F(Rdata_SOA_Test, getSerial) {
+    EXPECT_EQ(2010012601, rdata_soa.getSerial());
+}
+
 }
diff --git a/src/lib/python/isc/bind10/component.py b/src/lib/python/isc/bind10/component.py
index 68dd13f..5df102e 100644
--- a/src/lib/python/isc/bind10/component.py
+++ b/src/lib/python/isc/bind10/component.py
@@ -187,7 +187,7 @@ class BaseComponent:
 
         The exit code is used for logging. It might be None.
 
-        It calles _failed_internal internally.
+        It calls _failed_internal internally.
         """
         logger.error(BIND10_COMPONENT_FAILED, self.name(), self.pid(),
                      exit_code if exit_code is not None else "unknown")
diff --git a/src/lib/python/isc/bind10/special_component.py b/src/lib/python/isc/bind10/special_component.py
index 77a5dcb..9972200 100644
--- a/src/lib/python/isc/bind10/special_component.py
+++ b/src/lib/python/isc/bind10/special_component.py
@@ -113,6 +113,11 @@ class XfrIn(Component):
         Component.__init__(self, process, boss, kind, 'Xfrin', None,
                            boss.start_xfrin)
 
+class XfrOut(Component):
+    def __init__(self, process, boss, kind, address=None, params=None):
+        Component.__init__(self, process, boss, kind, 'Xfrout', None,
+                           boss.start_xfrout)
+
 class SetUID(BaseComponent):
     """
     This is a pseudo-component which drops root privileges when started
@@ -154,6 +159,7 @@ def get_specials():
         'cmdctl': CmdCtl,
         # FIXME: Temporary workaround before #1292 is done
         'xfrin': XfrIn,
+        'xfrout': XfrOut,
         # TODO: Remove when not needed, workaround before sockcreator works
         'setuid': SetUID
     }
diff --git a/src/lib/python/isc/datasrc/client_inc.cc b/src/lib/python/isc/datasrc/client_inc.cc
index 6465bf3..f4fb01b 100644
--- a/src/lib/python/isc/datasrc/client_inc.cc
+++ b/src/lib/python/isc/datasrc/client_inc.cc
@@ -89,7 +89,7 @@ None\n\
 ";
 
 const char* const DataSourceClient_getIterator_doc = "\
-get_iterator(name, adjust_ttl=True) -> ZoneIterator\n\
+get_iterator(name, separate_rrs=False) -> ZoneIterator\n\
 \n\
 Returns an iterator to the given zone.\n\
 \n\
@@ -111,17 +111,18 @@ anything else.\n\
 Parameters:\n\
   isc.dns.Name The name of zone apex to be traversed. It doesn't do\n\
                nearest match as find_zone.\n\
-  adjust_ttl   If True, the iterator will treat RRs with the same\n\
-               name and type but different TTL values to be of the\n\
-               same RRset, and will adjust the TTL to the lowest\n\
-               value found. If false, it will consider the RR to\n\
-               belong to a different RRset.\n\
+  separate_rrs If true, the iterator will return each RR as a\n\
+               new RRset object. If false, the iterator will\n\
+               combine consecutive RRs with the name and type\n\
+               into 1 RRset. The capitalization of the RRset will\n\
+               be that of the first RR read, and TTLs will be\n\
+               adjusted to the lowest one found.\n\
 \n\
 Return Value(s): Pointer to the iterator.\n\
 ";
 
 const char* const DataSourceClient_getUpdater_doc = "\
-get_updater(name, replace) -> ZoneUpdater\n\
+get_updater(name, replace, journaling=False) -> ZoneUpdater\n\
 \n\
 Return an updater to make updates to a specific zone.\n\
 \n\
@@ -162,6 +163,22 @@ A data source can be \"read only\" or can prohibit partial updates. In\n\
 such cases this method will result in an isc.datasrc.NotImplemented exception\n\
 unconditionally or when replace is false).\n\
 \n\
+If journaling is True, the data source should store a journal of\n\
+changes. These can be used later on by, for example, IXFR-out.\n\
+However, the parameter is a hint only. It might be unable to store\n\
+them and they would be silently discarded. Or it might need to store\n\
+them no matter what (for example a git-based data source would store\n\
+journal implicitly). When the journaling is True, it requires that the\n\
+following update be formatted as IXFR transfer (SOA to be removed,\n\
+bunch of RRs to be removed, SOA to be added, bunch of RRs to be added,\n\
+and possibly repeated). However, it is not required that the updater\n\
+checks that. If it is False, it must not require so and must accept\n\
+any order of changes.\n\
+\n\
+We don't support erasing the whole zone (by replace being True) and\n\
+saving a journal at the same time. In such situation, isc.datasrc.Error\n\
+is thrown.\n\
+\n\
 Exceptions:\n\
   isc.datasrc. NotImplemented The underlying data source does not support\n\
                updates.\n\
@@ -170,6 +187,7 @@ Exceptions:\n\
 Parameters:\n\
   name       The zone name to be updated\n\
   replace    Whether to delete existing RRs before making updates\n\
+  journaling The zone updater should store a journal of the changes.\n\
 \n\
 ";
 } // unnamed namespace
diff --git a/src/lib/python/isc/datasrc/client_python.cc b/src/lib/python/isc/datasrc/client_python.cc
index 49235a6..2740355 100644
--- a/src/lib/python/isc/datasrc/client_python.cc
+++ b/src/lib/python/isc/datasrc/client_python.cc
@@ -84,26 +84,26 @@ PyObject*
 DataSourceClient_getIterator(PyObject* po_self, PyObject* args) {
     s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
     PyObject* name_obj;
-    PyObject* adjust_ttl_obj = NULL;
+    PyObject* separate_rrs_obj = NULL;
     if (PyArg_ParseTuple(args, "O!|O", &name_type, &name_obj,
-                         &adjust_ttl_obj)) {
+                         &separate_rrs_obj)) {
         try {
-            bool adjust_ttl = true;
-            if (adjust_ttl_obj != NULL) {
+            bool separate_rrs = false;
+            if (separate_rrs_obj != NULL) {
                 // store result in local var so we can explicitely check for
                 // -1 error return value
-                int adjust_ttl_no = PyObject_Not(adjust_ttl_obj);
-                if (adjust_ttl_no == 1) {
-                    adjust_ttl = false;
-                } else if (adjust_ttl_no == -1) {
+                int separate_rrs_true = PyObject_IsTrue(separate_rrs_obj);
+                if (separate_rrs_true == 1) {
+                    separate_rrs = true;
+                } else if (separate_rrs_true == -1) {
                     PyErr_SetString(getDataSourceException("Error"),
-                                    "Error getting value of adjust_ttl");
+                                    "Error getting value of separate_rrs");
                     return (NULL);
                 }
             }
             return (createZoneIteratorObject(
                 self->cppobj->getInstance().getIterator(PyName_ToName(name_obj),
-                                                        adjust_ttl),
+                                                        separate_rrs),
                 po_self));
         } catch (const isc::NotImplemented& ne) {
             PyErr_SetString(getDataSourceException("NotImplemented"),
@@ -129,14 +129,17 @@ PyObject*
 DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
     s_DataSourceClient* const self = static_cast<s_DataSourceClient*>(po_self);
     PyObject *name_obj;
-    PyObject *replace_obj;
-    if (PyArg_ParseTuple(args, "O!O", &name_type, &name_obj, &replace_obj) &&
-        PyBool_Check(replace_obj)) {
-        bool replace = (replace_obj != Py_False);
+    PyObject *replace_obj = NULL;
+    PyObject *journaling_obj = Py_False;
+    if (PyArg_ParseTuple(args, "O!O|O", &name_type, &name_obj,
+                         &replace_obj, &journaling_obj) &&
+        PyBool_Check(replace_obj) && PyBool_Check(journaling_obj)) {
+        const bool replace = (replace_obj != Py_False);
+        const bool journaling = (journaling_obj == Py_True);
         try {
             ZoneUpdaterPtr updater =
                 self->cppobj->getInstance().getUpdater(PyName_ToName(name_obj),
-                                                       replace);
+                                                       replace, journaling);
             if (!updater) {
                 return (Py_None);
             }
@@ -157,6 +160,15 @@ DataSourceClient_getUpdater(PyObject* po_self, PyObject* args) {
             return (NULL);
         }
     } else {
+        // PyBool_Check doesn't set the error, so we have to set it ourselves.
+        if (replace_obj != NULL && !PyBool_Check(replace_obj)) {
+            PyErr_SetString(PyExc_TypeError, "'replace' for "
+                            "DataSourceClient.get_updater must be boolean");
+        }
+        if (!PyBool_Check(journaling_obj)) {
+            PyErr_SetString(PyExc_TypeError, "'journaling' for "
+                            "DataSourceClient.get_updater must be boolean");
+        }
         return (NULL);
     }
 }
diff --git a/src/lib/python/isc/datasrc/tests/datasrc_test.py b/src/lib/python/isc/datasrc/tests/datasrc_test.py
index 68e075a..02020e2 100644
--- a/src/lib/python/isc/datasrc/tests/datasrc_test.py
+++ b/src/lib/python/isc/datasrc/tests/datasrc_test.py
@@ -16,8 +16,9 @@
 import isc.log
 import isc.datasrc
 from isc.datasrc import ZoneFinder
-import isc.dns
+from isc.dns import *
 import unittest
+import sqlite3
 import os
 import shutil
 import sys
@@ -82,13 +83,12 @@ class DataSrcClient(unittest.TestCase):
                           isc.datasrc.DataSourceClient, "memory",
                           "{ \"foo\": 1 }")
 
-    @unittest.skip("This test may fail depending on sqlite3 library behavior")
     def test_iterate(self):
         dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
 
         # for RRSIGS, the TTL's are currently modified. This test should
         # start failing when we fix that.
-        rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), False)
+        rrs = dsc.get_iterator(isc.dns.Name("sql1.example.com."), True)
 
         # we do not know the order in which they are returned by the iterator
         # but we do want to check them, so we put all records into one list
@@ -115,7 +115,11 @@ class DataSrcClient(unittest.TestCase):
                      "256 3 5 AwEAAdYdRhBAEY67R/8G1N5AjGF6asIiNh/pNGeQ8xDQP13J"+
                      "N2lo+sNqWcmpYNhuVqRbLB+mamsU1XcCICSBvAlSmfz/ZUdafX23knAr"+
                      "TlALxMmspcfdpqun3Yr3YYnztuj06rV7RqmveYckWvAUXVYMSMQZfJ30"+
-                     "5fs0dE/xLztL/CzZ",
+                     "5fs0dE/xLztL/CzZ"
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.DNSKEY(), isc.dns.RRTTL(3600),
+                  [
                      "257 3 5 AwEAAbaKDSa9XEFTsjSYpUTHRotTS9Tz3krfDucugW5UokGQ"+
                      "KC26QlyHXlPTZkC+aRFUs/dicJX2kopndLcnlNAPWiKnKtrsFSCnIJDB"+
                      "ZIyvcKq+9RXmV3HK3bUdHnQZ88IZWBRmWKfZ6wnzHo53kdYKAemTErkz"+
@@ -127,8 +131,16 @@ class DataSrcClient(unittest.TestCase):
         add_rrset(expected_rrset_list, name, rrclass,
                   isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
                   [
-                    "dns01.example.com.",
-                    "dns02.example.com.",
+                    "dns01.example.com."
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+                  [
+                    "dns02.example.com."
+                  ])
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.NS(), isc.dns.RRTTL(3600),
+                  [
                     "dns03.example.com."
                   ])
         add_rrset(expected_rrset_list, name, rrclass,
@@ -139,15 +151,19 @@ class DataSrcClient(unittest.TestCase):
         # For RRSIGS, we can't add the fake data through the API, so we
         # simply pass no rdata at all (which is skipped by the check later)
         
-        # Since we passed adjust_ttl = False to get_iterator, we get several
+        # Since we passed separate_rrs = True to get_iterator, we get several
         # sets of RRSIGs, one for each TTL
         add_rrset(expected_rrset_list, name, rrclass,
                   isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
         add_rrset(expected_rrset_list, name, rrclass,
-                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
         add_rrset(expected_rrset_list, name, rrclass,
                   isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
         add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(3600), None)
+        add_rrset(expected_rrset_list, name, rrclass,
+                  isc.dns.RRType.RRSIG(), isc.dns.RRTTL(7200), None)
+        add_rrset(expected_rrset_list, name, rrclass,
                   isc.dns.RRType.SOA(), isc.dns.RRTTL(3600),
                   [
                      "master.example.com. admin.example.com. 678 3600 1800 2419200 7200"
@@ -191,26 +207,26 @@ class DataSrcClient(unittest.TestCase):
         # instead of failing?
         self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
 
-        # Without the adjust_ttl argument, it should return 55 RRsets
+        # Without the separate_rrs argument, it should return 55 RRsets
         dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
         rrets = dsc.get_iterator(isc.dns.Name("example.com"))
         # there are more than 80 RRs in this zone... let's just count them
         # (already did a full check of the smaller zone above)
         self.assertEqual(55, len(list(rrets)))
 
-        # same test, but now with explicit True argument for adjust_ttl
+        # same test, but now with explicit False argument for separate_rrs
         dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
-        rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
+        rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
         # there are more than 80 RRs in this zone... let's just count them
         # (already did a full check of the smaller zone above)
         self.assertEqual(55, len(list(rrets)))
 
         # Count should be 71 if we request individual rrsets for differing ttls
         dsc = isc.datasrc.DataSourceClient("sqlite3", READ_ZONE_DB_CONFIG)
-        rrets = dsc.get_iterator(isc.dns.Name("example.com"), False)
+        rrets = dsc.get_iterator(isc.dns.Name("example.com"), True)
         # there are more than 80 RRs in this zone... let's just count them
         # (already did a full check of the smaller zone above)
-        self.assertEqual(71, len(list(rrets)))
+        self.assertEqual(84, len(list(rrets)))
         # TODO should we catch this (iterating past end) and just return None
         # instead of failing?
         self.assertRaises(isc.datasrc.Error, rrs.get_next_rrset)
@@ -565,6 +581,125 @@ class DataSrcUpdater(unittest.TestCase):
         self.assertEqual(None, iterator.get_soa())
         self.assertEqual(None, iterator.get_next_rrset())
 
+class JournalWrite(unittest.TestCase):
+    def setUp(self):
+        # Make a fresh copy of the writable database with all original content
+        shutil.copyfile(READ_ZONE_DB_FILE, WRITE_ZONE_DB_FILE)
+        self.dsc = isc.datasrc.DataSourceClient("sqlite3",
+                                                WRITE_ZONE_DB_CONFIG)
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+
+    def tearDown(self):
+        self.dsc = None
+        self.updater = None
+
+    def check_journal(self, expected_list):
+        # This assumes sqlite3 DB and directly fetches stored data from
+        # the DB file.  It should be generalized using ZoneJournalReader
+        # once it's supported.
+        conn = sqlite3.connect(WRITE_ZONE_DB_FILE)
+        cur = conn.cursor()
+        cur.execute('SELECT name, rrtype, ttl, rdata FROM diffs ORDER BY id')
+        actual_list = cur.fetchall()
+        self.assertEqual(len(expected_list), len(actual_list))
+        for (expected, actual) in zip(expected_list, actual_list):
+            self.assertEqual(expected, actual)
+        conn.close()
+
+    def create_soa(self, serial):
+        soa = RRset(Name('example.org'), RRClass.IN(), RRType.SOA(),
+                    RRTTL(3600))
+        soa.add_rdata(Rdata(RRType.SOA(), RRClass.IN(),
+                            'ns1.example.org. admin.example.org. ' +
+                            str(serial) + ' 3600 1800 2419200 7200'))
+        return soa
+
+    def create_a(self, address):
+        a_rr = RRset(Name('www.example.org'), RRClass.IN(), RRType.A(),
+                     RRTTL(3600))
+        a_rr.add_rdata(Rdata(RRType.A(), RRClass.IN(), address))
+        return (a_rr)
+
+    def test_journal_write(self):
+        # This is a straightforward port of the C++ 'journal' test
+        # Note: we add/delete 'out of zone' data (example.org in the
+        # example.com zone for convenience.
+        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.delete_rrset(self.create_a('192.0.2.2'))
+        self.updater.add_rrset(self.create_soa(1235))
+        self.updater.add_rrset(self.create_a('192.0.2.2'))
+        self.updater.commit()
+
+        expected = []
+        expected.append(("example.org.", "SOA", 3600,
+                         "ns1.example.org. admin.example.org. " +
+                         "1234 3600 1800 2419200 7200"))
+        expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+        expected.append(("example.org.", "SOA", 3600,
+                         "ns1.example.org. admin.example.org. " +
+                         "1235 3600 1800 2419200 7200"))
+        expected.append(("www.example.org.", "A", 3600, "192.0.2.2"))
+        self.check_journal(expected)
+
+    def test_journal_write_multiple(self):
+        # This is a straightforward port of the C++ 'journalMultiple' test
+        expected = []
+        for i in range(1, 100):
+            self.updater.delete_rrset(self.create_soa(1234 + i - 1))
+            expected.append(("example.org.", "SOA", 3600,
+                             "ns1.example.org. admin.example.org. " +
+                             str(1234 + i - 1) + " 3600 1800 2419200 7200"))
+            self.updater.add_rrset(self.create_soa(1234 + i))
+            expected.append(("example.org.", "SOA", 3600,
+                             "ns1.example.org. admin.example.org. " +
+                             str(1234 + i) + " 3600 1800 2419200 7200"))
+        self.updater.commit()
+        self.check_journal(expected)
+
+    def test_journal_write_bad_sequence(self):
+        # This is a straightforward port of the C++ 'journalBadSequence' test
+
+        # Delete A before SOA
+        self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+                          self.create_a('192.0.2.1'))
+        # Add before delete
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+                          self.create_soa(1234))
+        # Add A before SOA
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.updater.delete_rrset(self.create_soa(1234))
+        self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+                          self.create_a('192.0.2.1'))
+        # Commit before add
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.updater.delete_rrset(self.create_soa(1234))
+        self.assertRaises(isc.datasrc.Error, self.updater.commit)
+        # Delete two SOAs
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.updater.delete_rrset(self.create_soa(1234))
+        self.assertRaises(isc.datasrc.Error, self.updater.delete_rrset,
+                          self.create_soa(1235))
+        # Add two SOAs
+        self.updater = self.dsc.get_updater(Name("example.com"), False, True)
+        self.updater.delete_rrset(self.create_soa(1234))
+        self.updater.add_rrset(self.create_soa(1235))
+        self.assertRaises(isc.datasrc.Error, self.updater.add_rrset,
+                          self.create_soa(1236))
+
+    def test_journal_write_onerase(self):
+        self.updater = None
+        self.assertRaises(isc.datasrc.Error, self.dsc.get_updater,
+                          Name("example.com"), True, True)
+
+    def test_journal_write_badparam(self):
+        dsc = isc.datasrc.DataSourceClient("sqlite3", WRITE_ZONE_DB_CONFIG)
+        self.assertRaises(TypeError, dsc.get_updater, 0, False, True)
+        self.assertRaises(TypeError, dsc.get_updater, Name('example.com'),
+                          False, 0)
+        self.assertRaises(TypeError, dsc.get_updater, Name("example.com"),
+                          1, True)
+
 if __name__ == "__main__":
     isc.log.init("bind10")
     isc.log.resetUnitTestRootLogger()
diff --git a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3
index cc8cfc3..521cf31 100644
Binary files a/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 and b/src/lib/python/isc/datasrc/tests/testdata/example.com.sqlite3 differ
diff --git a/src/lib/python/isc/notify/notify_out.py b/src/lib/python/isc/notify/notify_out.py
index 6b91c87..af79b7c 100644
--- a/src/lib/python/isc/notify/notify_out.py
+++ b/src/lib/python/isc/notify/notify_out.py
@@ -21,6 +21,7 @@ import threading
 import time
 import errno
 from isc.datasrc import sqlite3_ds
+from isc.datasrc import DataSourceClient
 from isc.net import addr
 import isc
 from isc.log_messages.notify_out_messages import *
@@ -31,7 +32,7 @@ logger = isc.log.Logger("notify_out")
 # we can't import we should not start anyway, and logging an error
 # is a bad idea since the logging system is most likely not
 # initialized yet. see trac ticket #1103
-from pydnspp import *
+from isc.dns import *
 
 ZONE_NEW_DATA_READY_CMD = 'zone_new_data_ready'
 _MAX_NOTIFY_NUM = 30
@@ -51,6 +52,24 @@ _BAD_REPLY_PACKET = 5
 
 SOCK_DATA = b's'
 
+# borrowed from xfrin.py @ #1298.  We should eventually unify it.
+def format_zone_str(zone_name, zone_class):
+    """Helper function to format a zone name and class as a string of
+       the form '<name>/<class>'.
+       Parameters:
+       zone_name (isc.dns.Name) name to format
+       zone_class (isc.dns.RRClass) class to format
+    """
+    return zone_name.to_text() + '/' + str(zone_class)
+
+class NotifyOutDataSourceError(Exception):
+    """An exception raised when data source error happens within notify out.
+
+    This exception is expected to be caught within the notify_out module.
+
+    """
+    pass
+
 class ZoneNotifyInfo:
     '''This class keeps track of notify-out information for one zone.'''
 
@@ -123,16 +142,20 @@ class NotifyOut:
         self._nonblock_event = threading.Event()
 
     def _init_notify_out(self, datasrc_file):
-        '''Get all the zones name and its notify target's address
+        '''Get all the zones name and its notify target's address.
+
         TODO, currently the zones are got by going through the zone
         table in database. There should be a better way to get them
         and also the setting 'also_notify', and there should be one
-        mechanism to cover the changed datasrc.'''
+        mechanism to cover the changed datasrc.
+
+        '''
         self._db_file = datasrc_file
         for zone_name, zone_class in sqlite3_ds.get_zones_info(datasrc_file):
             zone_id = (zone_name, zone_class)
             self._notify_infos[zone_id] = ZoneNotifyInfo(zone_name, zone_class)
-            slaves = self._get_notify_slaves_from_ns(zone_name)
+            slaves = self._get_notify_slaves_from_ns(Name(zone_name),
+                                                     RRClass(zone_class))
             for item in slaves:
                 self._notify_infos[zone_id].notify_slaves.append((item, 53))
 
@@ -234,7 +257,7 @@ class NotifyOut:
     def _get_rdata_data(self, rr):
         return rr[7].strip()
 
-    def _get_notify_slaves_from_ns(self, zone_name):
+    def _get_notify_slaves_from_ns(self, zone_name, zone_class):
         '''Get all NS records, then remove the primary master from ns rrset,
         then use the name in NS record rdata part to get the a/aaaa records
         in the same zone. the targets listed in a/aaaa record rdata are treated
@@ -242,28 +265,56 @@ class NotifyOut:
         Note: this is the simplest way to get the address of slaves,
         but not correct, it can't handle the delegation slaves, or the CNAME
         and DNAME logic.
-        TODO. the function should be provided by one library.'''
-        ns_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'NS', self._db_file)
-        soa_rrset = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
-        ns_rr_name = []
-        for ns in ns_rrset:
-            ns_rr_name.append(self._get_rdata_data(ns))
-
-        if len(soa_rrset) > 0:
-            sname = (soa_rrset[0][sqlite3_ds.RR_RDATA_INDEX].split(' '))[0].strip() #TODO, bad hardcode to get rdata part
-            if sname in ns_rr_name:
-                ns_rr_name.remove(sname)
-
-        addr_list = []
-        for rr_name in ns_rr_name:
-            a_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'A', self._db_file)
-            aaaa_rrset = sqlite3_ds.get_zone_rrset(zone_name, rr_name, 'AAAA', self._db_file)
-            for rr in a_rrset:
-                addr_list.append(self._get_rdata_data(rr))
-            for rr in aaaa_rrset:
-                addr_list.append(self._get_rdata_data(rr))
-
-        return addr_list
+        TODO. the function should be provided by one library.
+
+        '''
+        # Prepare data source client.  This should eventually be moved to
+        # an earlier stage of initialization and also support multiple
+        # data sources.
+        datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+        try:
+            result, finder = DataSourceClient('sqlite3',
+                                              datasrc_config).find_zone(
+                zone_name)
+        except isc.datasrc.Error as ex:
+            logger.error(NOTIFY_OUT_DATASRC_ACCESS_FAILURE, ex)
+            return []
+        if result is not DataSourceClient.SUCCESS:
+            logger.error(NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND,
+                         format_zone_str(zone_name, zone_class))
+            return []
+
+        result, ns_rrset = finder.find(zone_name, RRType.NS(), None,
+                                       finder.FIND_DEFAULT)
+        if result is not finder.SUCCESS or ns_rrset is None:
+            logger.warn(NOTIFY_OUT_ZONE_NO_NS,
+                        format_zone_str(zone_name, zone_class))
+            return []
+        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+                                        finder.FIND_DEFAULT)
+        if result is not finder.SUCCESS or soa_rrset is None or \
+                soa_rrset.get_rdata_count() != 1:
+            logger.warn(NOTIFY_OUT_ZONE_BAD_SOA,
+                        format_zone_str(zone_name, zone_class))
+            return []           # broken zone anyway, stop here.
+        soa_mname = Name(soa_rrset.get_rdata()[0].to_text().split(' ')[0])
+
+        addrs = []
+        for ns_rdata in ns_rrset.get_rdata():
+            ns_name = Name(ns_rdata.to_text())
+            if soa_mname == ns_name:
+                continue
+            result, rrset = finder.find(ns_name, RRType.A(), None,
+                                        finder.FIND_DEFAULT)
+            if result is finder.SUCCESS and rrset is not None:
+                addrs.extend([a.to_text() for a in rrset.get_rdata()])
+
+            result, rrset = finder.find(ns_name, RRType.AAAA(), None,
+                                        finder.FIND_DEFAULT)
+            if result is finder.SUCCESS and rrset is not None:
+                addrs.extend([aaaa.to_text() for aaaa in rrset.get_rdata()])
+
+        return addrs
 
     def _prepare_select_info(self):
         '''
@@ -404,8 +455,9 @@ class NotifyOut:
                         self._nonblock_event.set()
 
     def _send_notify_message_udp(self, zone_notify_info, addrinfo):
-        msg, qid = self._create_notify_message(zone_notify_info.zone_name,
-                                               zone_notify_info.zone_class)
+        msg, qid = self._create_notify_message(
+            Name(zone_notify_info.zone_name),
+            RRClass(zone_notify_info.zone_class))
         render = MessageRenderer()
         render.set_length_limit(512)
         msg.to_wire(render)
@@ -426,17 +478,6 @@ class NotifyOut:
 
         return True
 
-    def _create_rrset_from_db_record(self, record, zone_class):
-        '''Create one rrset from one record of datasource, if the schema of record is changed,
-        This function should be updated first. TODO, the function is copied from xfrout, there
-        should be library for creating one rrset. '''
-        rrtype_ = RRType(record[sqlite3_ds.RR_TYPE_INDEX])
-        rdata_ = Rdata(rrtype_, RRClass(zone_class), " ".join(record[sqlite3_ds.RR_RDATA_INDEX:]))
-        rrset_ = RRset(Name(record[sqlite3_ds.RR_NAME_INDEX]), RRClass(zone_class), \
-                       rrtype_, RRTTL( int(record[sqlite3_ds.RR_TTL_INDEX])))
-        rrset_.add_rdata(rdata_)
-        return rrset_
-
     def _create_notify_message(self, zone_name, zone_class):
         msg = Message(Message.RENDER)
         qid = random.randint(0, 0xFFFF)
@@ -444,14 +485,36 @@ class NotifyOut:
         msg.set_opcode(Opcode.NOTIFY())
         msg.set_rcode(Rcode.NOERROR())
         msg.set_header_flag(Message.HEADERFLAG_AA)
-        question = Question(Name(zone_name), RRClass(zone_class), RRType('SOA'))
-        msg.add_question(question)
-        # Add soa record to answer section
-        soa_record = sqlite3_ds.get_zone_rrset(zone_name, zone_name, 'SOA', self._db_file)
-        rrset_soa = self._create_rrset_from_db_record(soa_record[0], zone_class)
-        msg.add_rrset(Message.SECTION_ANSWER, rrset_soa)
+        msg.add_question(Question(zone_name, zone_class, RRType.SOA()))
+        msg.add_rrset(Message.SECTION_ANSWER, self._get_zone_soa(zone_name,
+                                                                 zone_class))
         return msg, qid
 
+    def _get_zone_soa(self, zone_name, zone_class):
+        # We create (and soon drop) the data source client here because
+        # clients should be thread specific.  We could let the main thread
+        # loop (_dispatcher) create and retain the client in order to avoid
+        # the overhead when we generalize the interface (and we may also
+        # revisit the design of notify_out more substantially anyway).
+        datasrc_config = '{ "database_file": "' + self._db_file + '"}'
+        result, finder = DataSourceClient('sqlite3',
+                                          datasrc_config).find_zone(zone_name)
+        if result is not DataSourceClient.SUCCESS:
+            raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+                                           zone_name.to_text() + '/' +
+                                           zone_class.to_text() + ' not found')
+
+        result, soa_rrset = finder.find(zone_name, RRType.SOA(), None,
+                                        finder.FIND_DEFAULT)
+        if result is not finder.SUCCESS or soa_rrset is None or \
+                soa_rrset.get_rdata_count() != 1:
+            raise NotifyOutDataSourceError('_get_zone_soa: Zone ' +
+                                           zone_name.to_text() + '/' +
+                                           zone_class.to_text() +
+                                           ' is broken: no valid SOA found')
+
+        return soa_rrset
+
     def _handle_notify_reply(self, zone_notify_info, msg_data, from_addr):
         '''Parse the notify reply message.
         rcode will not checked here, If we get the response
diff --git a/src/lib/python/isc/notify/notify_out_messages.mes b/src/lib/python/isc/notify/notify_out_messages.mes
index 570f51e..b77a60c 100644
--- a/src/lib/python/isc/notify/notify_out_messages.mes
+++ b/src/lib/python/isc/notify/notify_out_messages.mes
@@ -81,3 +81,24 @@ programming error, since all exceptions should have been caught
 explicitly. Please file a bug report. Since there was a response,
 no more notifies will be sent to this server for this notification
 event.
+
+% NOTIFY_OUT_DATASRC_ACCESS_FAILURE failed to get access to data source: %1
+notify_out failed to get access to one of configured data sources.
+Detailed error is shown in the log message.  This can be either a
+configuration error or installation setup failure.
+
+% NOTIFY_OUT_DATASRC_ZONE_NOT_FOUND Zone %1 is not found
+notify_out attempted to get slave information of a zone but the zone
+isn't found in the expected data source.  This shouldn't happen,
+because notify_out first identifies a list of available zones before
+this process.  So this means some critical inconsistency in the data
+source or software bug.
+
+% NOTIFY_OUT_ZONE_NO_NS Zone %1 doesn't have NS RR
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an NS RR.  Notify message won't be sent to such a zone.
+
+% NOTIFY_OUT_ZONE_BAD_SOA Zone %1 is invalid in terms of SOA
+This is a warning issued when the notify_out module finds a zone that
+doesn't have an SOA RR or has multiple SOA RRs.  Notify message won't
+be sent to such a zone.
diff --git a/src/lib/python/isc/notify/tests/Makefile.am b/src/lib/python/isc/notify/tests/Makefile.am
index 00c2eee..6b62b90 100644
--- a/src/lib/python/isc/notify/tests/Makefile.am
+++ b/src/lib/python/isc/notify/tests/Makefile.am
@@ -1,12 +1,20 @@
 PYCOVERAGE_RUN=@PYCOVERAGE_RUN@
 PYTESTS = notify_out_test.py
 EXTRA_DIST = $(PYTESTS)
+EXTRA_DIST += testdata/test.sqlite3 testdata/brokentest.sqlite3
+# The rest of the files are actually not necessary, but added for reference
+EXTRA_DIST += testdata/example.com testdata/example.net
+EXTRA_DIST += testdata/nons.example testdata/nosoa.example
+EXTRA_DIST += testdata/multisoa.example
 
 # If necessary (rare cases), explicitly specify paths to dynamic libraries
 # required by loadable python modules.
 LIBRARY_PATH_PLACEHOLDER =
 if SET_ENV_LIBRARY_PATH
 LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/cryptolink/.libs:$(abs_top_builddir)/src/lib/dns/.libs:$(abs_top_builddir)/src/lib/dns/python/.libs:$(abs_top_builddir)/src/lib/cc/.libs:$(abs_top_builddir)/src/lib/config/.libs:$(abs_top_builddir)/src/lib/log/.libs:$(abs_top_builddir)/src/lib/util/.libs:$(abs_top_builddir)/src/lib/exceptions/.libs:$(abs_top_builddir)/src/lib/datasrc/.libs:$$$(ENV_LIBRARY_PATH)
+else
+# Some systems need the ds path even if not all paths are necessary
+LIBRARY_PATH_PLACEHOLDER += $(ENV_LIBRARY_PATH)=$(abs_top_builddir)/src/lib/datasrc/.libs
 endif
 
 # test using command-line arguments, so use check-local target instead of TESTS
@@ -20,5 +28,6 @@ endif
 	echo Running test: $$pytest ; \
 	PYTHONPATH=$(COMMON_PYTHON_PATH):$(abs_top_builddir)/src/lib/dns/python/.libs \
 	$(LIBRARY_PATH_PLACEHOLDER) \
+	TESTDATASRCDIR=$(abs_top_srcdir)/src/lib/python/isc/notify/tests/testdata/ \
 	$(PYCOVERAGE_RUN) $(abs_srcdir)/$$pytest || exit ; \
 	done
diff --git a/src/lib/python/isc/notify/tests/notify_out_test.py b/src/lib/python/isc/notify/tests/notify_out_test.py
index 83f6d1a..d64c203 100644
--- a/src/lib/python/isc/notify/tests/notify_out_test.py
+++ b/src/lib/python/isc/notify/tests/notify_out_test.py
@@ -19,9 +19,11 @@ import os
 import tempfile
 import time
 import socket
-from isc.datasrc import sqlite3_ds
 from isc.notify import notify_out, SOCK_DATA
 import isc.log
+from isc.dns import *
+
+TESTDATA_SRCDIR = os.getenv("TESTDATASRCDIR")
 
 # our fake socket, where we can read and insert messages
 class MockSocket():
@@ -92,10 +94,8 @@ class TestZoneNotifyInfo(unittest.TestCase):
 
 class TestNotifyOut(unittest.TestCase):
     def setUp(self):
-        self._db_file = tempfile.NamedTemporaryFile(delete=False)
-        sqlite3_ds.load(self._db_file.name, 'example.net.', self._example_net_data_reader)
-        sqlite3_ds.load(self._db_file.name, 'example.com.', self._example_com_data_reader)
-        self._notify = notify_out.NotifyOut(self._db_file.name)
+        self._db_file = TESTDATA_SRCDIR + '/test.sqlite3'
+        self._notify = notify_out.NotifyOut(self._db_file)
         self._notify._notify_infos[('example.com.', 'IN')] = MockZoneNotifyInfo('example.com.', 'IN')
         self._notify._notify_infos[('example.com.', 'CH')] = MockZoneNotifyInfo('example.com.', 'CH')
         self._notify._notify_infos[('example.net.', 'IN')] = MockZoneNotifyInfo('example.net.', 'IN')
@@ -110,10 +110,6 @@ class TestNotifyOut(unittest.TestCase):
         com_ch_info = self._notify._notify_infos[('example.com.', 'CH')]
         com_ch_info.notify_slaves.append(('1.1.1.1', 5353))
 
-    def tearDown(self):
-        self._db_file.close()
-        os.unlink(self._db_file.name)
-
     def test_send_notify(self):
         notify_out._MAX_NOTIFY_NUM = 2
 
@@ -309,39 +305,9 @@ class TestNotifyOut(unittest.TestCase):
         self._notify._zone_notify_handler(example_net_info, notify_out._EVENT_READ)
         self.assertNotEqual(cur_tgt, example_net_info._notify_current)
 
-
-    def _example_net_data_reader(self):
-        zone_data = [
-        ('example.net.',         '1000',  'IN',  'SOA', 'a.dns.example.net. mail.example.net. 1 1 1 1 1'),
-        ('example.net.',         '1000',  'IN',  'NS',  'a.dns.example.net.'),
-        ('example.net.',         '1000',  'IN',  'NS',  'b.dns.example.net.'),
-        ('example.net.',         '1000',  'IN',  'NS',  'c.dns.example.net.'),
-        ('a.dns.example.net.',   '1000',  'IN',  'A',    '1.1.1.1'),
-        ('a.dns.example.net.',   '1000',  'IN',  'AAAA', '2:2::2:2'),
-        ('b.dns.example.net.',   '1000',  'IN',  'A',    '3.3.3.3'),
-        ('b.dns.example.net.',   '1000',  'IN',  'AAAA', '4:4::4:4'),
-        ('b.dns.example.net.',   '1000',  'IN',  'AAAA', '5:5::5:5'),
-        ('c.dns.example.net.',   '1000',  'IN',  'A',    '6.6.6.6'),
-        ('c.dns.example.net.',   '1000',  'IN',  'A',    '7.7.7.7'),
-        ('c.dns.example.net.',   '1000',  'IN',  'AAAA', '8:8::8:8')]
-        for item in zone_data:
-            yield item
-
-    def _example_com_data_reader(self):
-        zone_data = [
-        ('example.com.',         '1000',  'IN',  'SOA', 'a.dns.example.com. mail.example.com. 1 1 1 1 1'),
-        ('example.com.',         '1000',  'IN',  'NS',  'a.dns.example.com.'),
-        ('example.com.',         '1000',  'IN',  'NS',  'b.dns.example.com.'),
-        ('example.com.',         '1000',  'IN',  'NS',  'c.dns.example.com.'),
-        ('a.dns.example.com.',   '1000',  'IN',  'A',    '1.1.1.1'),
-        ('b.dns.example.com.',   '1000',  'IN',  'A',    '3.3.3.3'),
-        ('b.dns.example.com.',   '1000',  'IN',  'AAAA', '4:4::4:4'),
-        ('b.dns.example.com.',   '1000',  'IN',  'AAAA', '5:5::5:5')]
-        for item in zone_data:
-            yield item
-
     def test_get_notify_slaves_from_ns(self):
-        records = self._notify._get_notify_slaves_from_ns('example.net.')
+        records = self._notify._get_notify_slaves_from_ns(Name('example.net.'),
+                                                          RRClass.IN())
         self.assertEqual(6, len(records))
         self.assertEqual('8:8::8:8', records[5])
         self.assertEqual('7.7.7.7', records[4])
@@ -350,14 +316,32 @@ class TestNotifyOut(unittest.TestCase):
         self.assertEqual('4:4::4:4', records[1])
         self.assertEqual('3.3.3.3', records[0])
 
-        records = self._notify._get_notify_slaves_from_ns('example.com.')
+        records = self._notify._get_notify_slaves_from_ns(Name('example.com.'),
+                                                          RRClass.IN())
         self.assertEqual(3, len(records))
         self.assertEqual('5:5::5:5', records[2])
         self.assertEqual('4:4::4:4', records[1])
         self.assertEqual('3.3.3.3', records[0])
 
+    def test_get_notify_slaves_from_ns_unusual(self):
+        self._notify._db_file = TESTDATA_SRCDIR + '/brokentest.sqlite3'
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('nons.example'), RRClass.IN()))
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('nosoa.example'), RRClass.IN()))
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('multisoa.example'), RRClass.IN()))
+
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('nosuchzone.example'), RRClass.IN()))
+
+        # This will cause failure in getting access to the data source.
+        self._notify._db_file = TESTDATA_SRCDIR + '/nodir/error.sqlite3'
+        self.assertEqual([], self._notify._get_notify_slaves_from_ns(
+                Name('example.com'), RRClass.IN()))
+
     def test_init_notify_out(self):
-        self._notify._init_notify_out(self._db_file.name)
+        self._notify._init_notify_out(self._db_file)
         self.assertListEqual([('3.3.3.3', 53), ('4:4::4:4', 53), ('5:5::5:5', 53)],
                              self._notify._notify_infos[('example.com.', 'IN')].notify_slaves)
 
@@ -417,6 +401,5 @@ class TestNotifyOut(unittest.TestCase):
 
 if __name__== "__main__":
     isc.log.init("bind10")
+    isc.log.resetUnitTestRootLogger()
     unittest.main()
-
-
diff --git a/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3
new file mode 100644
index 0000000..61e766c
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/brokentest.sqlite3 differ
diff --git a/src/lib/python/isc/notify/tests/testdata/example.com b/src/lib/python/isc/notify/tests/testdata/example.com
new file mode 100644
index 0000000..5d59819
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.com
@@ -0,0 +1,10 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+example.com.         1000  IN  SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+example.com.         1000  IN  NS  a.dns.example.com.
+example.com.         1000  IN  NS  b.dns.example.com.
+example.com.         1000  IN  NS  c.dns.example.com.
+a.dns.example.com.   1000  IN  A    1.1.1.1
+b.dns.example.com.   1000  IN  A    3.3.3.3
+b.dns.example.com.   1000  IN  AAAA 4:4::4:4
+b.dns.example.com.   1000  IN  AAAA 5:5::5:5
diff --git a/src/lib/python/isc/notify/tests/testdata/example.net b/src/lib/python/isc/notify/tests/testdata/example.net
new file mode 100644
index 0000000..001d2d9
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/example.net
@@ -0,0 +1,14 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+example.net.         1000  IN  SOA a.dns.example.net. mail.example.net. 1 1 1 1 1
+example.net.         1000  IN  NS  a.dns.example.net.
+example.net.         1000  IN  NS  b.dns.example.net.
+example.net.         1000  IN  NS  c.dns.example.net.
+a.dns.example.net.   1000  IN  A    1.1.1.1
+a.dns.example.net.   1000  IN  AAAA 2:2::2:2
+b.dns.example.net.   1000  IN  A    3.3.3.3
+b.dns.example.net.   1000  IN  AAAA 4:4::4:4
+b.dns.example.net.   1000  IN  AAAA 5:5::5:5
+c.dns.example.net.   1000  IN  A    6.6.6.6
+c.dns.example.net.   1000  IN  A    7.7.7.7
+c.dns.example.net.   1000  IN  AAAA 8:8::8:8
diff --git a/src/lib/python/isc/notify/tests/testdata/multisoa.example b/src/lib/python/isc/notify/tests/testdata/multisoa.example
new file mode 100644
index 0000000..eca2fbd
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/multisoa.example
@@ -0,0 +1,5 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+multisoa.example.         1000  IN  SOA a.dns.multisoa.example. mail.multisoa.example. 1 1 1 1 1
+multisoa.example.         1000  IN  SOA a.dns.multisoa.example. mail.multisoa.example. 2 2 2 2 2
+multisoa.example.         1000  IN  NS  a.dns.multisoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/nons.example b/src/lib/python/isc/notify/tests/testdata/nons.example
new file mode 100644
index 0000000..c1fc1b8
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nons.example
@@ -0,0 +1,3 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+nons.example.         1000  IN  SOA a.dns.nons.example. mail.nons.example. 1 1 1 1 1
diff --git a/src/lib/python/isc/notify/tests/testdata/nosoa.example b/src/lib/python/isc/notify/tests/testdata/nosoa.example
new file mode 100644
index 0000000..18e87e1
--- /dev/null
+++ b/src/lib/python/isc/notify/tests/testdata/nosoa.example
@@ -0,0 +1,7 @@
+;; This is the source of a zone stored in test.sqlite3.  It's provided
+;; for reference purposes only.
+;; (SOA has been removed)
+nosoa.example.         1000  IN  SOA a.dns.example.com. mail.example.com. 1 1 1 1 1
+nosoa.example.         1000  IN  NS  a.dns.nosoa.example.
+nosoa.example.         1000  IN  NS  b.dns.nosoa.example.
+nosoa.example.         1000  IN  NS  c.dns.nosoa.example.
diff --git a/src/lib/python/isc/notify/tests/testdata/test.sqlite3 b/src/lib/python/isc/notify/tests/testdata/test.sqlite3
new file mode 100644
index 0000000..e3cadb0
Binary files /dev/null and b/src/lib/python/isc/notify/tests/testdata/test.sqlite3 differ




More information about the bind10-changes mailing list